Example #1
0
static int
tap_dev_poll(int unit, int events, struct lwp *l)
{
	struct tap_softc *sc =
	    device_lookup_private(&tap_cd, unit);
	int revents = 0;

	if (sc == NULL)
		return POLLERR;

	if (events & (POLLIN|POLLRDNORM)) {
		struct ifnet *ifp = &sc->sc_ec.ec_if;
		struct mbuf *m;
		int s;

		s = splnet();
		IFQ_POLL(&ifp->if_snd, m);

		if (m != NULL)
			revents |= events & (POLLIN|POLLRDNORM);
		else {
			mutex_spin_enter(&sc->sc_kqlock);
			selrecord(l, &sc->sc_rsel);
			mutex_spin_exit(&sc->sc_kqlock);
		}
		splx(s);
	}
	revents |= events & (POLLOUT|POLLWRNORM);

	return (revents);
}
Example #2
0
void
imxenet_start(struct ifnet *ifp)
{
	struct imxenet_softc *sc = ifp->if_softc;
	struct mbuf *m_head = NULL;

	if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
		return;

	for (;;) {
		IFQ_POLL(&ifp->if_snd, m_head);
		if (m_head == NULL)
			break;

		if (imxenet_encap(sc, m_head)) {
			ifp->if_flags |= IFF_OACTIVE;
			break;
		}

		IFQ_DEQUEUE(&ifp->if_snd, m_head);

		ifp->if_opackets++;

#if NBPFILTER > 0
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
#endif

		m_freem(m_head);
	}
}
ifnet_ret_t
pdq_ifstart(
    struct ifnet *ifp)
{
    pdq_softc_t *sc = (pdq_softc_t *) ((caddr_t) ifp - offsetof(pdq_softc_t, sc_arpcom.ac_if));
    struct mbuf *m;
    int tx = 0;

    if ((ifp->if_flags & IFF_RUNNING) == 0)
        return;

    if (sc->sc_if.if_timer == 0)
        sc->sc_if.if_timer = PDQ_OS_TX_TIMEOUT;

    if ((sc->sc_pdq->pdq_flags & PDQ_TXOK) == 0) {
        sc->sc_if.if_flags |= IFF_OACTIVE;
        return;
    }
    for (;; tx = 1) {
        IFQ_POLL(&ifp->if_snd, m);
        if (m == NULL)
            break;

        if (pdq_queue_transmit_data(sc->sc_pdq, m) == PDQ_FALSE) {
            ifp->if_flags |= IFF_OACTIVE;
            break;
        }

        IFQ_DEQUEUE(&ifp->if_snd, m);
    }
    if (tx)
        PDQ_DO_TYPE2_PRODUCER(sc->sc_pdq);
}
Example #4
0
void
smsc_start(struct ifnet *ifp)
{
    struct smsc_softc	*sc = ifp->if_softc;
    struct mbuf		*m_head = NULL;

    /* Don't send anything if there is no link or controller is busy. */
    if ((sc->sc_flags & SMSC_FLAG_LINK) == 0) {
        return;
    }

    if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING)
        return;

    IFQ_POLL(&ifp->if_snd, m_head);
    if (m_head == NULL)
        return;

    if (smsc_encap(sc, m_head, 0)) {
        ifp->if_flags |= IFF_OACTIVE;
        return;
    }
    IFQ_DEQUEUE(&ifp->if_snd, m_head);

    bpf_mtap(ifp, m_head);

    ifp->if_flags |= IFF_OACTIVE;

    /*
     * Set a timeout in case the chip goes out to lunch.
     */
    ifp->if_timer = 5;
}
Example #5
0
void
smsc_start(struct ifnet *ifp)
{
	struct smsc_softc	*sc = ifp->if_softc;
	struct mbuf		*m_head = NULL;

	/* Don't send anything if there is no link or controller is busy. */
	if ((sc->sc_flags & SMSC_FLAG_LINK) == 0 ||
		(ifp->if_flags & IFF_OACTIVE) != 0) {
		return;
	}

	IFQ_POLL(&ifp->if_snd, m_head);
	if (m_head == NULL)
		return;

	if (smsc_encap(sc, m_head, 0)) {
		ifp->if_flags |= IFF_OACTIVE;
		return;
	}
	IFQ_DEQUEUE(&ifp->if_snd, m_head);

#if NBPFILTER > 0
	if (ifp->if_bpf)
		bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
#endif
	ifp->if_flags |= IFF_OACTIVE;
}
Example #6
0
static int
tap_dev_ioctl(int unit, u_long cmd, void *data, struct lwp *l)
{
	struct tap_softc *sc = device_lookup_private(&tap_cd, unit);

	if (sc == NULL)
		return ENXIO;

	switch (cmd) {
	case FIONREAD:
		{
			struct ifnet *ifp = &sc->sc_ec.ec_if;
			struct mbuf *m;
			int s;

			s = splnet();
			IFQ_POLL(&ifp->if_snd, m);

			if (m == NULL)
				*(int *)data = 0;
			else
				*(int *)data = m->m_pkthdr.len;
			splx(s);
			return 0;
		} 
	case TIOCSPGRP:
	case FIOSETOWN:
		return fsetown(&sc->sc_pgid, cmd, data);
	case TIOCGPGRP:
	case FIOGETOWN:
		return fgetown(sc->sc_pgid, cmd, data);
	case FIOASYNC:
		if (*(int *)data)
			sc->sc_flags |= TAP_ASYNCIO;
		else
			sc->sc_flags &= ~TAP_ASYNCIO;
		return 0;
	case FIONBIO:
		if (*(int *)data)
			sc->sc_flags |= TAP_NBIO;
		else
			sc->sc_flags &= ~TAP_NBIO;
		return 0;
#ifdef OTAPGIFNAME
	case OTAPGIFNAME:
#endif
	case TAPGIFNAME:
		{
			struct ifreq *ifr = (struct ifreq *)data;
			struct ifnet *ifp = &sc->sc_ec.ec_if;

			strlcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ);
			return 0;
		}
	default:
		return ENOTTY;
	}
}
Example #7
0
void
kue_start(struct ifnet *ifp)
{
	struct kue_softc	*sc = ifp->if_softc;
	struct mbuf		*m_head = NULL;

	DPRINTFN(10,("%s: %s: enter\n", sc->kue_dev.dv_xname,__func__));

	if (sc->kue_dying)
		return;

	if (ifp->if_flags & IFF_OACTIVE)
		return;

	IFQ_POLL(&ifp->if_snd, m_head);
	if (m_head == NULL)
		return;

	if (kue_send(sc, m_head, 0)) {
		ifp->if_flags |= IFF_OACTIVE;
		return;
	}

	IFQ_DEQUEUE(&ifp->if_snd, m_head);

#if NBPFILTER > 0
	/*
	 * If there's a BPF listener, bounce a copy of this frame
	 * to him.
	 */
	if (ifp->if_bpf)
		bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
#endif

	ifp->if_flags |= IFF_OACTIVE;

	/*
	 * Set a timeout in case the chip goes out to lunch.
	 */
	ifp->if_timer = 6;
}
Example #8
0
void
tsec_start(struct ifnet *ifp)
{
	struct tsec_softc *sc = ifp->if_softc;
	struct mbuf *m;
	int idx;

	if (!(ifp->if_flags & IFF_RUNNING))
		return;
	if (ifp->if_flags & IFF_OACTIVE)
		return;
	if (IFQ_IS_EMPTY(&ifp->if_snd))
		return;

	idx = sc->sc_tx_prod;
	while ((sc->sc_txdesc[idx].td_status & TSEC_TX_TO1) == 0) {
		IFQ_POLL(&ifp->if_snd, m);
		if (m == NULL)
			break;

		if (tsec_encap(sc, m, &idx)) {
			ifp->if_flags |= IFF_OACTIVE;
			break;
		}

		/* Now we are committed to transmit the packet. */
		IFQ_DEQUEUE(&ifp->if_snd, m);

#if NBPFILTER > 0
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif
	}

	if (sc->sc_tx_prod != idx) {
		sc->sc_tx_prod = idx;

		/* Set a timeout in case the chip goes out to lunch. */
		ifp->if_timer = 5;
	}
}
Example #9
0
static int
tap_kqread(struct knote *kn, long hint)
{
	struct tap_softc *sc = (struct tap_softc *)kn->kn_hook;
	struct ifnet *ifp = &sc->sc_ec.ec_if;
	struct mbuf *m;
	int s, rv;

	KERNEL_LOCK(1, NULL);
	s = splnet();
	IFQ_POLL(&ifp->if_snd, m);

	if (m == NULL)
		kn->kn_data = 0;
	else
		kn->kn_data = m->m_pkthdr.len;
	splx(s);
	rv = (kn->kn_data != 0 ? 1 : 0);
	KERNEL_UNLOCK_ONE(NULL);
	return rv;
}
Example #10
0
void
cdcef_start(struct ifnet *ifp)
{
	struct cdcef_softc	*sc = ifp->if_softc;
	struct mbuf		*m_head = NULL;

	if(ifp->if_flags & IFF_OACTIVE)
		return;

	IFQ_POLL(&ifp->if_snd, m_head);
	if (m_head == NULL) {
		return;
	}

	if (sc->sc_listening == 0 || m_head->m_pkthdr.len > CDCEF_BUFSZ) {
		/*
		 * drop packet because receiver is not listening,
		 * or if packet is larger than xmit buffer
		 */
		IFQ_DEQUEUE(&ifp->if_snd, m_head);
		m_freem(m_head);
		return;
	}

	if (cdcef_encap(sc, m_head, 0)) {
		ifp->if_flags |= IFF_OACTIVE;
		return;
	}

	IFQ_DEQUEUE(&ifp->if_snd, m_head);

#if NBPFILTER > 0
	if (ifp->if_bpf)
		bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
#endif
					
	ifp->if_flags |= IFF_OACTIVE;

	ifp->if_timer = 6;
}
Example #11
0
void
cas_start(struct ifnet *ifp)
{
	struct cas_softc *sc = ifp->if_softc;
	struct mbuf *m;
	u_int32_t bix;

	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		return;

	bix = sc->sc_tx_prod;
	while (sc->sc_txd[bix].sd_mbuf == NULL) {
		IFQ_POLL(&ifp->if_snd, m);
		if (m == NULL)
			break;

#if NBPFILTER > 0
		/*
		 * If BPF is listening on this interface, let it see the
		 * packet before we commit it to the wire.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif

		/*
		 * Encapsulate this packet and start it going...
		 * or fail...
		 */
		if (cas_encap(sc, m, &bix)) {
			ifp->if_flags |= IFF_OACTIVE;
			break;
		}

		IFQ_DEQUEUE(&ifp->if_snd, m);
		ifp->if_timer = 5;
	}

	sc->sc_tx_prod = bix;
}
Example #12
0
Static void
url_start(struct ifnet *ifp)
{
	struct url_softc *sc = ifp->if_softc;
	struct mbuf *m_head = NULL;

	DPRINTF(("%s: %s: enter, link=%d\n", USBDEVNAME(sc->sc_dev),
		 __func__, sc->sc_link));

	if (sc->sc_dying)
		return;

	if (!sc->sc_link)
		return;

	if (ifp->if_flags & IFF_OACTIVE)
		return;

	IFQ_POLL(&ifp->if_snd, m_head);
	if (m_head == NULL)
		return;

	if (url_send(sc, m_head, 0)) {
		ifp->if_flags |= IFF_OACTIVE;
		return;
	}

	IFQ_DEQUEUE(&ifp->if_snd, m_head);

#if NBPFILTER > 0
	if (ifp->if_bpf)
		bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
#endif

	ifp->if_flags |= IFF_OACTIVE;

	/* Set a timeout in case the chip goes out to lunch. */
	ifp->if_timer = 5;
}
Example #13
0
static void
kue_start(struct ifnet *ifp)
{
	struct kue_softc	*sc = ifp->if_softc;
	struct mbuf		*m;

	DPRINTFN(10,("%s: %s: enter\n", device_xname(sc->kue_dev),__func__));

	if (sc->kue_dying)
		return;

	if (ifp->if_flags & IFF_OACTIVE)
		return;

	IFQ_POLL(&ifp->if_snd, m);
	if (m == NULL)
		return;

	if (kue_send(sc, m, 0)) {
		ifp->if_flags |= IFF_OACTIVE;
		return;
	}

	IFQ_DEQUEUE(&ifp->if_snd, m);

	/*
	 * If there's a BPF listener, bounce a copy of this frame
	 * to him.
	 */
	bpf_mtap(ifp, m);
	m_freem(m);

	ifp->if_flags |= IFF_OACTIVE;

	/*
	 * Set a timeout in case the chip goes out to lunch.
	 */
	ifp->if_timer = 6;
}
Example #14
0
Static void
url_start(struct ifnet *ifp)
{
	struct url_softc *sc = ifp->if_softc;
	struct mbuf *m_head = NULL;

	DPRINTF(("%s: %s: enter, link=%d\n", device_xname(sc->sc_dev),
		 __func__, sc->sc_link));

	if (sc->sc_dying)
		return;

	if (!sc->sc_link)
		return;

	if (ifp->if_flags & IFF_OACTIVE)
		return;

	IFQ_POLL(&ifp->if_snd, m_head);
	if (m_head == NULL)
		return;

	if (url_send(sc, m_head, 0)) {
		ifp->if_flags |= IFF_OACTIVE;
		return;
	}

	IFQ_DEQUEUE(&ifp->if_snd, m_head);

	bpf_mtap(ifp, m_head);

	ifp->if_flags |= IFF_OACTIVE;

	/* Set a timeout in case the chip goes out to lunch. */
	ifp->if_timer = 5;
}
Example #15
0
File: if_sq.c Project: MarginC/kame
void
sq_start(struct ifnet *ifp)
{
	struct sq_softc *sc = ifp->if_softc;
	u_int32_t status;
	struct mbuf *m0, *m;
	bus_dmamap_t dmamap;
	int err, totlen, nexttx, firsttx, lasttx, ofree, seg;

	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/*
	 * Remember the previous number of free descriptors and
	 * the first descriptor we'll use.
	 */
	ofree = sc->sc_nfreetx;
	firsttx = sc->sc_nexttx;

	/*
	 * Loop through the send queue, setting up transmit descriptors
	 * until we drain the queue, or use up all available transmit
	 * descriptors.
	 */
	while (sc->sc_nfreetx != 0) {
		/*
		 * Grab a packet off the queue.
		 */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;
		m = NULL;

		dmamap = sc->sc_txmap[sc->sc_nexttx];

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we were
		 * short on resources.  In this case, we'll copy and try
		 * again.
		 */
		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
						      BUS_DMA_NOWAIT) != 0) {
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: unable to allocate Tx mbuf\n",
				    sc->sc_dev.dv_xname);
				break;
			}
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_DONTWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					printf("%s: unable to allocate Tx "
					    "cluster\n", sc->sc_dev.dv_xname);
					m_freem(m);
					break;
				}
			}

			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;

			if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
						m, BUS_DMA_NOWAIT)) != 0) {
				printf("%s: unable to load Tx buffer, "
				    "error = %d\n", sc->sc_dev.dv_xname, err);
				break;
			}
		}

		/*
		 * Ensure we have enough descriptors free to describe
		 * the packet.
		 */
		if (dmamap->dm_nsegs > sc->sc_nfreetx) {
			/*
			 * Not enough free descriptors to transmit this
			 * packet.  We haven't committed to anything yet,
			 * so just unload the DMA map, put the packet
			 * back on the queue, and punt.  Notify the upper
			 * layer that there are no more slots left.
			 *
			 * XXX We could allocate an mbuf and copy, but
			 * XXX it is worth it?
			 */
			ifp->if_flags |= IFF_OACTIVE;
			bus_dmamap_unload(sc->sc_dmat, dmamap);
			if (m != NULL)
				m_freem(m);
			break;
		}

		IFQ_DEQUEUE(&ifp->if_snd, m0);
		if (m != NULL) {
			m_freem(m0);
			m0 = m;
		}

		/*
		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
		 */

		/* Sync the DMA map. */
		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
		    BUS_DMASYNC_PREWRITE);

		/*
		 * Initialize the transmit descriptors.
		 */
		for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
		     seg < dmamap->dm_nsegs;
		     seg++, nexttx = SQ_NEXTTX(nexttx)) {
			sc->sc_txdesc[nexttx].hdd_bufptr =
					    dmamap->dm_segs[seg].ds_addr;
			sc->sc_txdesc[nexttx].hdd_ctl =
					    dmamap->dm_segs[seg].ds_len;
			sc->sc_txdesc[nexttx].hdd_descptr=
					    SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
			lasttx = nexttx;
			totlen += dmamap->dm_segs[seg].ds_len;
		}

		/* Last descriptor gets end-of-packet */
		sc->sc_txdesc[lasttx].hdd_ctl |= HDD_CTL_EOPACKET;

		/* XXXrkb: if not EDLC, pad to min len manually */
		if (totlen < ETHER_MIN_LEN) {
		    sc->sc_txdesc[lasttx].hdd_ctl += (ETHER_MIN_LEN - totlen);
		    totlen = ETHER_MIN_LEN;
		}

#if 0
		printf("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
						       sc->sc_nexttx, lasttx,
						       totlen);
#endif

		if (ifp->if_flags & IFF_DEBUG) {
			printf("     transmit chain:\n");
			for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
				printf("     descriptor %d:\n", seg);
				printf("       hdd_bufptr:      0x%08x\n",
					sc->sc_txdesc[seg].hdd_bufptr);
				printf("       hdd_ctl: 0x%08x\n",
					sc->sc_txdesc[seg].hdd_ctl);
				printf("       hdd_descptr:      0x%08x\n",
					sc->sc_txdesc[seg].hdd_descptr);

				if (seg == lasttx)
					break;
			}
		}

		/* Sync the descriptors we're using. */
		SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
				BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/* Store a pointer to the packet so we can free it later */
		sc->sc_txmbuf[sc->sc_nexttx] = m0;

		/* Advance the tx pointer. */
		sc->sc_nfreetx -= dmamap->dm_nsegs;
		sc->sc_nexttx = nexttx;

#if NBPFILTER > 0
		/*
		 * Pass the packet to any BPF listeners.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m0);
#endif /* NBPFILTER > 0 */
	}

	/* All transmit descriptors used up, let upper layers know */
	if (sc->sc_nfreetx == 0)
		ifp->if_flags |= IFF_OACTIVE;

	if (sc->sc_nfreetx != ofree) {
#if 0
		printf("%s: %d packets enqueued, first %d, INTR on %d\n",
			    sc->sc_dev.dv_xname, lasttx - firsttx + 1,
			    firsttx, lasttx);
#endif

		/*
		 * Cause a transmit interrupt to happen on the
		 * last packet we enqueued, mark it as the last
		 * descriptor.
		 */
		sc->sc_txdesc[lasttx].hdd_ctl |= (HDD_CTL_INTR |
						  HDD_CTL_EOCHAIN);
		SQ_CDTXSYNC(sc, lasttx, 1,
				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

		/*
		 * There is a potential race condition here if the HPC
		 * DMA channel is active and we try and either update
		 * the 'next descriptor' pointer in the HPC PIO space
		 * or the 'next descriptor' pointer in a previous desc-
		 * riptor.
		 *
		 * To avoid this, if the channel is active, we rely on
		 * the transmit interrupt routine noticing that there
		 * are more packets to send and restarting the HPC DMA
		 * engine, rather than mucking with the DMA state here.
		 */
		status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
						       HPC_ENETX_CTL);

		if ((status & ENETX_CTL_ACTIVE) != 0) {
			SQ_TRACE(SQ_ADD_TO_DMA, firsttx, status,
			    sc->sc_nfreetx);
			sc->sc_txdesc[SQ_PREVTX(firsttx)].hdd_ctl &=
			    ~HDD_CTL_EOCHAIN;
			SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx),  1,
			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
		} else {
			SQ_TRACE(SQ_START_DMA, firsttx, status, sc->sc_nfreetx);

			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
			    HPC_ENETX_NDBP, SQ_CDTXADDR(sc, firsttx));

			/* Kick DMA channel into life */
			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
			    HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
		}

		/* Set a watchdog timer in case the chip flakes out. */
		ifp->if_timer = 5;
	}
}
Example #16
0
/*
 * ae_start:		[ifnet interface function]
 *
 *	Start packet transmission on the interface.
 */
static void
ae_start(struct ifnet *ifp)
{
	struct ae_softc *sc = ifp->if_softc;
	struct mbuf *m0, *m;
	struct ae_txsoft *txs;
	bus_dmamap_t dmamap;
	int error, firsttx, nexttx, lasttx = 1, ofree, seg;

	DPRINTF(sc, ("%s: ae_start: sc_flags 0x%08x, if_flags 0x%08x\n",
	    device_xname(sc->sc_dev), sc->sc_flags, ifp->if_flags));


	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/*
	 * Remember the previous number of free descriptors and
	 * the first descriptor we'll use.
	 */
	ofree = sc->sc_txfree;
	firsttx = sc->sc_txnext;

	DPRINTF(sc, ("%s: ae_start: txfree %d, txnext %d\n",
	    device_xname(sc->sc_dev), ofree, firsttx));

	/*
	 * Loop through the send queue, setting up transmit descriptors
	 * until we drain the queue, or use up all available transmit
	 * descriptors.
	 */
	while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
	       sc->sc_txfree != 0) {
		/*
		 * Grab a packet off the queue.
		 */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;
		m = NULL;

		dmamap = txs->txs_dmamap;

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we were
		 * short on resources.  In this case, we'll copy and try
		 * again.
		 */
		if (((mtod(m0, uintptr_t) & 3) != 0) ||
		    bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
		      BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: unable to allocate Tx mbuf\n",
				    device_xname(sc->sc_dev));
				break;
			}
			MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_DONTWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					printf("%s: unable to allocate Tx "
					    "cluster\n", device_xname(sc->sc_dev));
					m_freem(m);
					break;
				}
			}
			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
			if (error) {
				printf("%s: unable to load Tx buffer, "
				    "error = %d\n", device_xname(sc->sc_dev),
				    error);
				break;
			}
		}

		/*
		 * Ensure we have enough descriptors free to describe
		 * the packet.
		 */
		if (dmamap->dm_nsegs > sc->sc_txfree) {
			/*
			 * Not enough free descriptors to transmit this
			 * packet.  We haven't committed to anything yet,
			 * so just unload the DMA map, put the packet
			 * back on the queue, and punt.  Notify the upper
			 * layer that there are no more slots left.
			 *
			 * XXX We could allocate an mbuf and copy, but
			 * XXX it is worth it?
			 */
			ifp->if_flags |= IFF_OACTIVE;
			bus_dmamap_unload(sc->sc_dmat, dmamap);
			if (m != NULL)
				m_freem(m);
			break;
		}

		IFQ_DEQUEUE(&ifp->if_snd, m0);
		if (m != NULL) {
			m_freem(m0);
			m0 = m;
		}

		/*
		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
		 */

		/* Sync the DMA map. */
		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
		    BUS_DMASYNC_PREWRITE);

		/*
		 * Initialize the transmit descriptors.
		 */
		for (nexttx = sc->sc_txnext, seg = 0;
		     seg < dmamap->dm_nsegs;
		     seg++, nexttx = AE_NEXTTX(nexttx)) {
			/*
			 * If this is the first descriptor we're
			 * enqueueing, don't set the OWN bit just
			 * yet.  That could cause a race condition.
			 * We'll do it below.
			 */
			sc->sc_txdescs[nexttx].ad_status =
			    (nexttx == firsttx) ? 0 : ADSTAT_OWN;
			sc->sc_txdescs[nexttx].ad_bufaddr1 =
			    dmamap->dm_segs[seg].ds_addr;
			sc->sc_txdescs[nexttx].ad_ctl =
			    (dmamap->dm_segs[seg].ds_len <<
				ADCTL_SIZE1_SHIFT) |
				(nexttx == (AE_NTXDESC - 1) ?
				    ADCTL_ER : 0);
			lasttx = nexttx;
		}

		KASSERT(lasttx != -1);

		/* Set `first segment' and `last segment' appropriately. */
		sc->sc_txdescs[sc->sc_txnext].ad_ctl |= ADCTL_Tx_FS;
		sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_LS;

#ifdef AE_DEBUG
		if (ifp->if_flags & IFF_DEBUG) {
			printf("     txsoft %p transmit chain:\n", txs);
			for (seg = sc->sc_txnext;; seg = AE_NEXTTX(seg)) {
				printf("     descriptor %d:\n", seg);
				printf("       ad_status:   0x%08x\n",
				    sc->sc_txdescs[seg].ad_status);
				printf("       ad_ctl:      0x%08x\n",
				    sc->sc_txdescs[seg].ad_ctl);
				printf("       ad_bufaddr1: 0x%08x\n",
				    sc->sc_txdescs[seg].ad_bufaddr1);
				printf("       ad_bufaddr2: 0x%08x\n",
				    sc->sc_txdescs[seg].ad_bufaddr2);
				if (seg == lasttx)
					break;
			}
		}
#endif

		/* Sync the descriptors we're using. */
		AE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/*
		 * Store a pointer to the packet so we can free it later,
		 * and remember what txdirty will be once the packet is
		 * done.
		 */
		txs->txs_mbuf = m0;
		txs->txs_firstdesc = sc->sc_txnext;
		txs->txs_lastdesc = lasttx;
		txs->txs_ndescs = dmamap->dm_nsegs;

		/* Advance the tx pointer. */
		sc->sc_txfree -= dmamap->dm_nsegs;
		sc->sc_txnext = nexttx;

		SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
		SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);

		/*
		 * Pass the packet to any BPF listeners.
		 */
		bpf_mtap(ifp, m0);
	}
/*
 * Start output on interface.
 */
void
nistart(struct ifnet *ifp)
{
	struct ni_softc *sc = ifp->if_softc;
	struct ni_dg *data;
	struct ni_bbd *bdp;
	struct mbuf *m, *m0;
	int i, cnt, res, mlen;

	if (ifp->if_flags & IFF_OACTIVE)
		return;
#ifdef DEBUG
	if (ifp->if_flags & IFF_DEBUG)
		printf("%s: nistart\n", device_xname(sc->sc_dev));
#endif

	while (fqb->nf_dforw) {
		IFQ_POLL(&ifp->if_snd, m);
		if (m == 0)
			break;

		data = REMQHI(&fqb->nf_dforw);
		if ((int)data == Q_EMPTY) {
			ifp->if_flags |= IFF_OACTIVE;
			break;
		}

		IFQ_DEQUEUE(&ifp->if_snd, m);

		/*
		 * Count number of mbufs in chain.
		 * Always do DMA directly from mbufs, therefore the transmit
		 * ring is really big.
		 */
		for (m0 = m, cnt = 0; m0; m0 = m0->m_next)
			if (m0->m_len)
				cnt++;
		if (cnt > NTXFRAGS)
			panic("nistart"); /* XXX */

		bpf_mtap(ifp, m);
		bdp = &bbd[(data->bufs[0]._index & 0x7fff)];
		for (m0 = m, i = 0, mlen = 0; m0; m0 = m0->m_next) {
			if (m0->m_len == 0)
				continue;
			bdp->nb_status = (mtod(m0, u_int32_t) & NIBD_OFFSET) |
			    NIBD_VALID;
			bdp->nb_pte = (u_int32_t)kvtopte(mtod(m0, void *));
			bdp->nb_len = m0->m_len;
			data->bufs[i]._offset = 0;
			data->bufs[i]._len = bdp->nb_len;
			data->bufs[i]._index |= NIDG_CHAIN;
			mlen += bdp->nb_len;
			bdp++;
			i++;
		}
		data->nd_opcode = BVP_DGRAM;
		data->nd_pad3 = 1;
		data->nd_ptdbidx = 1;
		data->nd_len = 10 + i * 8;
		data->bufs[i - 1]._index &= ~NIDG_CHAIN;
		data->nd_cmdref = (u_int32_t)m;
#ifdef DEBUG
		if (ifp->if_flags & IFF_DEBUG)
			printf("%s: sending %d bytes (%d segments)\n",
			    device_xname(sc->sc_dev), mlen, i);
#endif

		res = INSQTI(data, &gvp->nc_forw0);
		if (res == Q_EMPTY) {
			WAITREG(NI_PCR, PCR_OWN);
			NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
		}
	}
}
Example #18
0
void
vnet_start_desc(struct ifnet *ifp)
{
	struct vnet_softc *sc = ifp->if_softc;
	struct ldc_map *map = sc->sc_lm;
	struct vnet_desc_msg dm;
	struct mbuf *m;
	paddr_t pa;
	caddr_t buf;
	u_int prod, count;

	for (;;) {
		IFQ_POLL(&ifp->if_snd, m);
		if (m == NULL)
			break;

		count = sc->sc_tx_prod - sc->sc_tx_cons;
		if (count >= (sc->sc_vd->vd_nentries - 1) ||
		    map->lm_count >= map->lm_nentries) {
			ifp->if_flags |= IFF_OACTIVE;
			return;
		}

		buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO);
		if (buf == NULL) {
			ifp->if_flags |= IFF_OACTIVE;
			return;
		}
		m_copydata(m, 0, m->m_pkthdr.len, buf);
		IFQ_DEQUEUE(&ifp->if_snd, m);

#if NBPFILTER > 0
		/*
		 * If BPF is listening on this interface, let it see the
		 * packet before we commit it to the wire.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif

		pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa);
		KASSERT((pa & ~PAGE_MASK) == (pa & LDC_MTE_RA_MASK));
		while (map->lm_slot[map->lm_next].entry != 0) {
			map->lm_next++;
			map->lm_next &= (map->lm_nentries - 1);
		}
		map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK);
		map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR;
		atomic_inc_int(&map->lm_count);

		prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1);
		sc->sc_vsd[prod].vsd_map_idx = map->lm_next;
		sc->sc_vsd[prod].vsd_buf = buf;

		bzero(&dm, sizeof(dm));
		dm.tag.type = VIO_TYPE_DATA;
		dm.tag.stype = VIO_SUBTYPE_INFO;
		dm.tag.stype_env = VIO_DESC_DATA;
		dm.tag.sid = sc->sc_local_sid;
		dm.seq_no = sc->sc_seq_no++;
		dm.desc_handle = sc->sc_tx_prod;
		dm.nbytes = max(m->m_pkthdr.len, 60);
		dm.ncookies = 1;
		dm.cookie[0].addr =
			map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK);
		dm.cookie[0].size = 2048;
		vnet_sendmsg(sc, &dm, sizeof(dm));

		sc->sc_tx_prod++;
		sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1);

		m_freem(m);
	}
}
Example #19
0
/* initiate output routine */
void
iee_start(struct ifnet *ifp)
{
	struct iee_softc *sc = ifp->if_softc;
	struct mbuf *m = NULL;
	struct iee_tbd *tbd;
	int t;
	int n;

	if (sc->sc_next_cb != 0)
		/* There is already a CMD running. Defer packet enqueuing. */
		return;
	for (t = 0 ; t < IEE_NCB ; t++) {
		IFQ_DEQUEUE(&ifp->if_snd, sc->sc_tx_mbuf[t]);
		if (sc->sc_tx_mbuf[t] == NULL)
			break;
		if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
		    sc->sc_tx_mbuf[t], BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
			/*
			 * The packet needs more TBD than we support.
			 * Copy the packet into a mbuf cluster to get it out.
			 */
			printf("%s: iee_start: failed to load DMA map\n",
			    device_xname(sc->sc_dev));
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: iee_start: can't allocate mbuf\n",
				    device_xname(sc->sc_dev));
				m_freem(sc->sc_tx_mbuf[t]);
				t--;
				continue;
			}
			MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
			MCLGET(m, M_DONTWAIT);
			if ((m->m_flags & M_EXT) == 0) {
				printf("%s: iee_start: can't allocate mbuf "
				    "cluster\n", device_xname(sc->sc_dev));
				m_freem(sc->sc_tx_mbuf[t]);
				m_freem(m);
				t--;
				continue;
			}
			m_copydata(sc->sc_tx_mbuf[t], 0,
			    sc->sc_tx_mbuf[t]->m_pkthdr.len, mtod(m, void *));
			m->m_pkthdr.len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
			m->m_len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
			m_freem(sc->sc_tx_mbuf[t]);
			sc->sc_tx_mbuf[t] = m;
			if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
		    	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
				printf("%s: iee_start: can't load TX DMA map\n",
				    device_xname(sc->sc_dev));
				m_freem(sc->sc_tx_mbuf[t]);
				t--;
				continue;
			}
		}
		for (n = 0 ; n < sc->sc_tx_map[t]->dm_nsegs ; n++) {
			tbd = SC_TBD(sc, sc->sc_next_tbd + n);
			tbd->tbd_tb_addr =
			    IEE_SWAPA32(sc->sc_tx_map[t]->dm_segs[n].ds_addr);
			tbd->tbd_size =
			    sc->sc_tx_map[t]->dm_segs[n].ds_len;
			tbd->tbd_link_addr =
			    IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_tbd_off +
			    sc->sc_tbd_sz * (sc->sc_next_tbd + n + 1)));
		}
		SC_TBD(sc, sc->sc_next_tbd + n - 1)->tbd_size |= IEE_CB_EL;
		bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
		    sc->sc_tbd_off + sc->sc_next_tbd * sc->sc_tbd_sz,
		    sc->sc_tbd_sz * sc->sc_tx_map[t]->dm_nsegs,
		    BUS_DMASYNC_PREWRITE);
		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_map[t], 0,
		    sc->sc_tx_map[t]->dm_mapsize, BUS_DMASYNC_PREWRITE);
		IFQ_POLL(&ifp->if_snd, m);
		if (m == NULL)
			iee_cb_setup(sc, IEE_CB_CMD_TR | IEE_CB_S | IEE_CB_EL
			    | IEE_CB_I);
		else
			iee_cb_setup(sc, IEE_CB_CMD_TR);
		sc->sc_next_tbd += n;
		/* Pass packet to bpf if someone listens. */
		bpf_mtap(ifp, sc->sc_tx_mbuf[t]);
	}
Example #20
0
/* Start packet transmission on the interface. */
void
bce_start(struct ifnet *ifp)
{
	struct bce_softc *sc = ifp->if_softc;
	struct mbuf    *m0;
	bus_dmamap_t    dmamap;
	int             txstart;
	int             txsfree;
	int             newpkts = 0;
	int             error;

	/*
         * do not start another if currently transmitting, and more
         * descriptors(tx slots) are needed for next packet.
         */
	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/* determine number of descriptors available */
	if (sc->bce_txsnext >= sc->bce_txin)
		txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext;
	else
		txsfree = sc->bce_txin - sc->bce_txsnext - 1;

	/*
         * Loop through the send queue, setting up transmit descriptors
         * until we drain the queue, or use up all available transmit
         * descriptors.
         */
	while (txsfree > 0) {
		int             seg;

		/* Grab a packet off the queue. */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;

		/* get the transmit slot dma map */
		dmamap = sc->bce_cdata.bce_tx_map[sc->bce_txsnext];

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we
		 * were short on resources. If the packet will not fit,
		 * it will be dropped. If short on resources, it will
		 * be tried again later.
		 */
		error = bus_dmamap_load_mbuf(sc->bce_dmatag, dmamap, m0,
		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
		if (error == EFBIG) {
			printf("%s: Tx packet consumes too many DMA segments, "
			    "dropping...\n", sc->bce_dev.dv_xname);
			IFQ_DEQUEUE(&ifp->if_snd, m0);
			m_freem(m0);
			ifp->if_oerrors++;
			continue;
		} else if (error) {
			/* short on resources, come back later */
			printf("%s: unable to load Tx buffer, error = %d\n",
			    sc->bce_dev.dv_xname, error);
			break;
		}
		/* If not enough descriptors available, try again later */
		if (dmamap->dm_nsegs > txsfree) {
			ifp->if_flags |= IFF_OACTIVE;
			bus_dmamap_unload(sc->bce_dmatag, dmamap);
			break;
		}
		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */

		/* So take it off the queue */
		IFQ_DEQUEUE(&ifp->if_snd, m0);

		/* save the pointer so it can be freed later */
		sc->bce_cdata.bce_tx_chain[sc->bce_txsnext] = m0;

		/* Sync the data DMA map. */
		bus_dmamap_sync(sc->bce_dmatag, dmamap, 0, dmamap->dm_mapsize,
				BUS_DMASYNC_PREWRITE);

		/* Initialize the transmit descriptor(s). */
		txstart = sc->bce_txsnext;
		for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
			u_int32_t ctrl;

			ctrl = dmamap->dm_segs[seg].ds_len & CTRL_BC_MASK;
			if (seg == 0)
				ctrl |= CTRL_SOF;
			if (seg == dmamap->dm_nsegs - 1)
				ctrl |= CTRL_EOF;
			if (sc->bce_txsnext == BCE_NTXDESC - 1)
				ctrl |= CTRL_EOT;
			ctrl |= CTRL_IOC;
			sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl);
			sc->bce_tx_ring[sc->bce_txsnext].addr =
			    htole32(dmamap->dm_segs[seg].ds_addr + 0x40000000);	/* MAGIC */
			if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1)
				sc->bce_txsnext = 0;
			else
				sc->bce_txsnext++;
			txsfree--;
		}
		/* sync descriptors being used */
		bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
			  sizeof(struct bce_dma_slot) * txstart + PAGE_SIZE,
			     sizeof(struct bce_dma_slot) * dmamap->dm_nsegs,
				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

		/* Give the packet to the chip. */
		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR,
			     sc->bce_txsnext * sizeof(struct bce_dma_slot));

		newpkts++;

#if NBPFILTER > 0
		/* Pass the packet to any BPF listeners. */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
#endif				/* NBPFILTER > 0 */
	}
	if (txsfree == 0) {
		/* No more slots left; notify upper layer. */
		ifp->if_flags |= IFF_OACTIVE;
	}
	if (newpkts) {
		/* Set a watchdog timer in case the chip flakes out. */
		ifp->if_timer = 5;
	}
}
Example #21
0
void
smap_start(struct ifnet *ifp)
{
	struct smap_softc *sc = ifp->if_softc;
	struct smap_desc *d;
	struct mbuf *m0, *m;
	u_int8_t *p, *q;
	u_int32_t *r;
	int i, sz, pktsz;
	u_int16_t fifop;
	u_int16_t r16;

	KDASSERT(ifp->if_flags & IFF_RUNNING);
	FUNC_ENTER();

	while (1) {
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			goto end;

		pktsz = m0->m_pkthdr.len;
		KDASSERT(pktsz <= ETHER_MAX_LEN - ETHER_CRC_LEN);
		sz = ROUND4(pktsz);

		if (sz > sc->tx_buf_freesize ||
		    sc->tx_desc_cnt >= SMAP_DESC_MAX ||
		    emac3_tx_done() != 0) {
			ifp->if_flags |= IFF_OACTIVE;
			goto end;
		}

		IFQ_DEQUEUE(&ifp->if_snd, m0);
		KDASSERT(m0 != NULL);
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m0);

		p = (u_int8_t *)sc->tx_buf;
		q = p + sz;
		/* copy to temporary buffer area */
		for (m = m0; m != 0; m = m->m_next) {
			memcpy(p, mtod(m, void *), m->m_len);
			p += m->m_len;
		}
		m_freem(m0);

		/* zero padding area */
		for (; p < q; p++)
			*p = 0;

		/* put to FIFO */
		fifop = sc->tx_fifo_ptr;
		KDASSERT((fifop & 3) == 0);
		_reg_write_2(SMAP_TXFIFO_PTR_REG16, fifop);
		sc->tx_fifo_ptr = (fifop + sz) & 0xfff;

		r = sc->tx_buf;
		for (i = 0; i < sz; i += sizeof(u_int32_t))
			*(volatile u_int32_t *)SMAP_TXFIFO_DATA_REG = *r++;
		_wbflush();

		/* put FIFO to EMAC3 */
		d = &sc->tx_desc[sc->tx_start_index];
		KDASSERT((d->stat & SMAP_TXDESC_READY) == 0);

		d->sz = pktsz;
		d->ptr = fifop + SMAP_TXBUF_BASE;
		d->stat = SMAP_TXDESC_READY | SMAP_TXDESC_GENFCS |
		    SMAP_TXDESC_GENPAD;
		_wbflush();

		sc->tx_buf_freesize -= sz;
		sc->tx_desc_cnt++;
		sc->tx_start_index = (sc->tx_start_index + 1) & 0x3f;
		_reg_write_1(SMAP_TXFIFO_FRAME_INC_REG8, 1);

		emac3_tx_kick();
		r16 = _reg_read_2(SPD_INTR_ENABLE_REG16);
		if ((r16 & SPD_INTR_TXDNV) == 0) {
			r16 |= SPD_INTR_TXDNV;
			_reg_write_2(SPD_INTR_ENABLE_REG16, r16);
		}
	}
 end:
	/* set watchdog timer */
	ifp->if_timer = 5;

	FUNC_EXIT();
}
Example #22
0
void
octeon_eth_start(struct ifnet *ifp)
{
	struct octeon_eth_softc *sc = ifp->if_softc;
	struct mbuf *m;

	/*
	 * performance tuning
	 * presend iobdma request 
	 */
	octeon_eth_send_queue_flush_prefetch(sc);

	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		goto last;

	/* XXX assume that OCTEON doesn't buffer packets */
	if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port))) {
		/* dequeue and drop them */
		while (1) {
			IFQ_DEQUEUE(&ifp->if_snd, m);
			if (m == NULL)
				break;
#if 0
#ifdef DDB
			m_print(m, "cd", printf);
#endif
			printf("%s: drop\n", sc->sc_dev.dv_xname);
#endif
			m_freem(m);
			IF_DROP(&ifp->if_snd);
		}
		goto last;
	}

	for (;;) {
		IFQ_POLL(&ifp->if_snd, m);
		if (__predict_false(m == NULL))
			break;

		octeon_eth_send_queue_flush_fetch(sc); /* XXX */

		/*
		 * XXXSEIL
		 * If no free send buffer is available, free all the sent buffer
		 * and bail out.
		 */
		if (octeon_eth_send_queue_is_full(sc)) {
			return;
		}
		/* XXX */

		IFQ_DEQUEUE(&ifp->if_snd, m);

		OCTEON_ETH_TAP(ifp, m, BPF_DIRECTION_OUT);

		/* XXX */
		if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
			octeon_eth_send_queue_flush(sc);
		if (octeon_eth_send(sc, m)) {
			ifp->if_oerrors++;
			m_freem(m);
			log(LOG_WARNING,
		  	  "%s: failed to transmit packet\n",
		    	  sc->sc_dev.dv_xname);
		} else {
			sc->sc_soft_req_cnt++;
		}
		if (sc->sc_flush)
			octeon_eth_send_queue_flush_sync(sc);
		/* XXX */

		/*
		 * send next iobdma request 
		 */
		octeon_eth_send_queue_flush_prefetch(sc);
	}

/*
 * XXXSEIL
 * Don't schedule send-buffer-free callout every time - those buffers are freed
 * by "free tick".  This makes some packets like NFS slower, but it normally
 * doesn't happen on SEIL.
 */
#ifdef OCTEON_ETH_USENFS
	if (__predict_false(sc->sc_ext_callback_cnt > 0)) {
		int timo;

		/* ??? */
		timo = hz - (100 * sc->sc_ext_callback_cnt);
		if (timo < 10)
			timo = 10;
		callout_schedule(&sc->sc_tick_free_ch, timo);
	}
#endif

last:
	octeon_eth_send_queue_flush_fetch(sc);
}
Example #23
0
/*
 * Start output on interface.
 */
void
zestart(struct ifnet *ifp)
{
	struct ze_softc *sc = ifp->if_softc;
	struct ze_cdata *zc = sc->sc_zedata;
	paddr_t	buffer;
	struct mbuf *m;
	int nexttx, starttx;
	int len, i, totlen, error;
	int old_inq = sc->sc_inq;
	uint16_t orword, tdr;
	bus_dmamap_t map;

	while (sc->sc_inq < (TXDESCS - 1)) {

		if (sc->sc_setup) {
			ze_setup(sc);
			continue;
		}
		nexttx = sc->sc_nexttx;
		IFQ_POLL(&sc->sc_if.if_snd, m);
		if (m == 0)
			goto out;
		/*
		 * Count number of mbufs in chain.
		 * Always do DMA directly from mbufs, therefore the transmit
		 * ring is really big.
		 */
		map = sc->sc_xmtmap[nexttx];
		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
		    BUS_DMA_WRITE);
		if (error) {
			aprint_error_dev(sc->sc_dev,
			    "zestart: load_mbuf failed: %d", error);
			goto out;
		}

		if (map->dm_nsegs >= TXDESCS)
			panic("zestart"); /* XXX */

		if ((map->dm_nsegs + sc->sc_inq) >= (TXDESCS - 1)) {
			bus_dmamap_unload(sc->sc_dmat, map);
			ifp->if_flags |= IFF_OACTIVE;
			goto out;
		}

		/*
		 * m now points to a mbuf chain that can be loaded.
		 * Loop around and set it.
		 */
		totlen = 0;
		orword = ZE_TDES1_FS;
		starttx = nexttx;
		for (i = 0; i < map->dm_nsegs; i++) {
			buffer = map->dm_segs[i].ds_addr;
			len = map->dm_segs[i].ds_len;

			KASSERT(len > 0);

			totlen += len;
			/* Word alignment calc */
			if (totlen == m->m_pkthdr.len) {
				sc->sc_txcnt += map->dm_nsegs;
				if (sc->sc_txcnt >= TXDESCS * 3 / 4) {
					orword |= ZE_TDES1_IC;
					sc->sc_txcnt = 0;
				}
				orword |= ZE_TDES1_LS;
				sc->sc_txmbuf[nexttx] = m;
			}
			zc->zc_xmit[nexttx].ze_bufsize = len;
			zc->zc_xmit[nexttx].ze_bufaddr = (char *)buffer;
			zc->zc_xmit[nexttx].ze_tdes1 = orword;
			zc->zc_xmit[nexttx].ze_tdr = tdr;

			if (++nexttx == TXDESCS)
				nexttx = 0;
			orword = 0;
			tdr = ZE_TDR_OW;
		}

		sc->sc_inq += map->dm_nsegs;

		IFQ_DEQUEUE(&ifp->if_snd, m);
#ifdef DIAGNOSTIC
		if (totlen != m->m_pkthdr.len)
			panic("zestart: len fault");
#endif
		/*
		 * Turn ownership of the packet over to the device.
		 */
		zc->zc_xmit[starttx].ze_tdr = ZE_TDR_OW;

		/*
		 * Kick off the transmit logic, if it is stopped.
		 */
		if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
			ZE_WCSR(ZE_CSR1, -1);
		sc->sc_nexttx = nexttx;
	}
	if (sc->sc_inq == (TXDESCS - 1))
		ifp->if_flags |= IFF_OACTIVE;

out:	if (old_inq < sc->sc_inq)
		ifp->if_timer = 5; /* If transmit logic dies */
}
Example #24
0
/*
 * sonic_start:		[ifnet interface function]
 *
 *	Start packet transmission on the interface.
 */
void
sonic_start(struct ifnet *ifp)
{
	struct sonic_softc *sc = ifp->if_softc;
	struct mbuf *m0, *m;
	struct sonic_tda16 *tda16;
	struct sonic_tda32 *tda32;
	struct sonic_descsoft *ds;
	bus_dmamap_t dmamap;
	int error, olasttx, nexttx, opending, totlen, olseg;
	int seg = 0;	/* XXX: gcc */

	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/*
	 * Remember the previous txpending and the current "last txdesc
	 * used" index.
	 */
	opending = sc->sc_txpending;
	olasttx = sc->sc_txlast;

	/*
	 * Loop through the send queue, setting up transmit descriptors
	 * until we drain the queue, or use up all available transmit
	 * descriptors.  Leave one at the end for sanity's sake.
	 */
	while (sc->sc_txpending < (SONIC_NTXDESC - 1)) {
		/*
		 * Grab a packet off the queue.
		 */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;
		m = NULL;

		/*
		 * Get the next available transmit descriptor.
		 */
		nexttx = SONIC_NEXTTX(sc->sc_txlast);
		ds = &sc->sc_txsoft[nexttx];
		dmamap = ds->ds_dmamap;

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the allotted number of frags, or we were
		 * short on resources.  In this case, we'll copy and try
		 * again.
		 */
		if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
		    BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 ||
		    (m0->m_pkthdr.len < ETHER_PAD_LEN &&
		    dmamap->dm_nsegs == SONIC_NTXFRAGS)) {
			if (error == 0)
				bus_dmamap_unload(sc->sc_dmat, dmamap);
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: unable to allocate Tx mbuf\n",
				    device_xname(sc->sc_dev));
				break;
			}
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_DONTWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					printf("%s: unable to allocate Tx "
					    "cluster\n",
					    device_xname(sc->sc_dev));
					m_freem(m);
					break;
				}
			}
			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
			if (error) {
				printf("%s: unable to load Tx buffer, "
				    "error = %d\n", device_xname(sc->sc_dev),
				    error);
				m_freem(m);
				break;
			}
		}
		IFQ_DEQUEUE(&ifp->if_snd, m0);
		if (m != NULL) {
			m_freem(m0);
			m0 = m;
		}

		/*
		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
		 */

		/* Sync the DMA map. */
		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
		    BUS_DMASYNC_PREWRITE);

		/*
		 * Store a pointer to the packet so we can free it later.
		 */
		ds->ds_mbuf = m0;

		/*
		 * Initialize the transmit descriptor.
		 */
		totlen = 0;
		if (sc->sc_32bit) {
			tda32 = &sc->sc_tda32[nexttx];
			for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
				tda32->tda_frags[seg].frag_ptr1 =
				    htosonic32(sc,
				    (dmamap->dm_segs[seg].ds_addr >> 16) &
				    0xffff);
				tda32->tda_frags[seg].frag_ptr0 =
				    htosonic32(sc,
				    dmamap->dm_segs[seg].ds_addr & 0xffff);
				tda32->tda_frags[seg].frag_size =
				    htosonic32(sc, dmamap->dm_segs[seg].ds_len);
				totlen += dmamap->dm_segs[seg].ds_len;
			}
			if (totlen < ETHER_PAD_LEN) {
				tda32->tda_frags[seg].frag_ptr1 =
				    htosonic32(sc,
				    (sc->sc_nulldma >> 16) & 0xffff);
				tda32->tda_frags[seg].frag_ptr0 =
				    htosonic32(sc, sc->sc_nulldma & 0xffff);
				tda32->tda_frags[seg].frag_size =
				    htosonic32(sc, ETHER_PAD_LEN - totlen);
				totlen = ETHER_PAD_LEN;
				seg++;
			}

			tda32->tda_status = 0;
			tda32->tda_pktconfig = 0;
			tda32->tda_pktsize = htosonic32(sc, totlen);
			tda32->tda_fragcnt = htosonic32(sc, seg);

			/* Link it up. */
			tda32->tda_frags[seg].frag_ptr0 =
			    htosonic32(sc, SONIC_CDTXADDR32(sc,
			    SONIC_NEXTTX(nexttx)) & 0xffff);

			/* Sync the Tx descriptor. */
			SONIC_CDTXSYNC32(sc, nexttx,
			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
		} else {
Example #25
0
void
mec_start(struct ifnet *ifp)
{
	struct mec_softc *sc = ifp->if_softc;
	struct mbuf *m0;
	struct mec_txdesc *txd;
	struct mec_txsoft *txs;
	bus_dmamap_t dmamap;
	bus_space_tag_t st = sc->sc_st;
	bus_space_handle_t sh = sc->sc_sh;
	uint64_t txdaddr;
	int error, firsttx, nexttx, opending;
	int len, bufoff, buflen, unaligned, txdlen;

	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/*
	 * Remember the previous txpending and the first transmit descriptor.
	 */
	opending = sc->sc_txpending;
	firsttx = MEC_NEXTTX(sc->sc_txlast);

	DPRINTF(MEC_DEBUG_START,
	    ("mec_start: opending = %d, firsttx = %d\n", opending, firsttx));

	for (;;) {
		/* Grab a packet off the queue. */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;

		if (sc->sc_txpending == MEC_NTXDESC) {
			break;
		}

		/*
		 * Get the next available transmit descriptor.
		 */
		nexttx = MEC_NEXTTX(sc->sc_txlast);
		txd = &sc->sc_txdesc[nexttx];
		txs = &sc->sc_txsoft[nexttx];

		buflen = 0;
		bufoff = 0;
		txdaddr = 0; /* XXX gcc */
		txdlen = 0; /* XXX gcc */

		len = m0->m_pkthdr.len;

		DPRINTF(MEC_DEBUG_START,
		    ("mec_start: len = %d, nexttx = %d\n", len, nexttx));

		IFQ_DEQUEUE(&ifp->if_snd, m0);
		if (len < ETHER_PAD_LEN) {
			/*
			 * I don't know if MEC chip does auto padding,
			 * so if the packet is small enough,
			 * just copy it to the buffer in txdesc.
			 * Maybe this is the simple way.
			 */
			DPRINTF(MEC_DEBUG_START, ("mec_start: short packet\n"));

			bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN);
			m_copydata(m0, 0, m0->m_pkthdr.len,
			    txd->txd_buf + bufoff);
			memset(txd->txd_buf + bufoff + len, 0,
			    ETHER_PAD_LEN - len);
			len = buflen = ETHER_PAD_LEN;

			txs->txs_flags = MEC_TXS_TXDBUF | buflen;
		} else {
			/*
			 * If the packet won't fit the buffer in txdesc,
			 * we have to use concatenate pointer to handle it.
			 * While MEC can handle up to three segments to
			 * concatenate, MEC requires that both the second and
			 * third segments have to be 8 byte aligned.
			 * Since it's unlikely for mbuf clusters, we use
			 * only the first concatenate pointer. If the packet
			 * doesn't fit in one DMA segment, allocate new mbuf
			 * and copy the packet to it.
			 *
			 * Besides, if the start address of the first segments
			 * is not 8 byte aligned, such part have to be copied
			 * to the txdesc buffer. (XXX see below comments)
	                 */
			DPRINTF(MEC_DEBUG_START, ("mec_start: long packet\n"));

			dmamap = txs->txs_dmamap;
			if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
			    BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
				struct mbuf *m;

				DPRINTF(MEC_DEBUG_START,
				    ("mec_start: re-allocating mbuf\n"));
				MGETHDR(m, M_DONTWAIT, MT_DATA);
				if (m == NULL) {
					printf("%s: unable to allocate "
					    "TX mbuf\n", sc->sc_dev.dv_xname);
					break;
				}
				if (len > (MHLEN - ETHER_ALIGN)) {
					MCLGET(m, M_DONTWAIT);
					if ((m->m_flags & M_EXT) == 0) {
						printf("%s: unable to allocate "
						    "TX cluster\n",
						    sc->sc_dev.dv_xname);
						m_freem(m);
						break;
					}
				}
				/*
				 * Each packet has the Ethernet header, so
				 * in many cases the header isn't 4-byte aligned
				 * and data after the header is 4-byte aligned.
				 * Thus adding 2-byte offset before copying to
				 * new mbuf avoids unaligned copy and this may
				 * improve performance.
				 * As noted above, unaligned part has to be
				 * copied to txdesc buffer so this may cause
				 * extra copy ops, but for now MEC always
				 * requires some data in txdesc buffer,
				 * so we always have to copy some data anyway.
				 */
				m->m_data += ETHER_ALIGN;
				m_copydata(m0, 0, len, mtod(m, caddr_t));
				m->m_pkthdr.len = m->m_len = len;
				m_freem(m0);
				m0 = m;
				error = bus_dmamap_load_mbuf(sc->sc_dmat,
				    dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
				if (error) {
					printf("%s: unable to load TX buffer, "
					    "error = %d\n",
					    sc->sc_dev.dv_xname, error);
					m_freem(m);
					break;
				}
			}

			/* Handle unaligned part. */
			txdaddr = MEC_TXD_ROUNDUP(dmamap->dm_segs[0].ds_addr);
			txs->txs_flags = MEC_TXS_TXDPTR1;
			unaligned =
			    dmamap->dm_segs[0].ds_addr & (MEC_TXD_ALIGN - 1);
			DPRINTF(MEC_DEBUG_START,
			    ("mec_start: ds_addr = 0x%x, unaligned = %d\n",
			    (u_int)dmamap->dm_segs[0].ds_addr, unaligned));
			if (unaligned != 0) {
				buflen = MEC_TXD_ALIGN - unaligned;
				bufoff = MEC_TXD_BUFSTART(buflen);
				DPRINTF(MEC_DEBUG_START,
				    ("mec_start: unaligned, "
				    "buflen = %d, bufoff = %d\n",
				    buflen, bufoff));
				memcpy(txd->txd_buf + bufoff,
				    mtod(m0, caddr_t), buflen);
				txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
			}
#if 1
			else {
				/*
				 * XXX needs hardware info XXX
				 * It seems MEC always requires some data
				 * in txd_buf[] even if buffer is
				 * 8-byte aligned otherwise DMA abort error
				 * occurs later...
				 */
				buflen = MEC_TXD_ALIGN;
				bufoff = MEC_TXD_BUFSTART(buflen);
				memcpy(txd->txd_buf + bufoff,
				    mtod(m0, caddr_t), buflen);
				DPRINTF(MEC_DEBUG_START,
				    ("mec_start: aligned, "
				    "buflen = %d, bufoff = %d\n",
				    buflen, bufoff));
				txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
				txdaddr += MEC_TXD_ALIGN;
			}
#endif
			txdlen  = len - buflen;
			DPRINTF(MEC_DEBUG_START,
			    ("mec_start: txdaddr = 0x%llx, txdlen = %d\n",
			    txdaddr, txdlen));

			/*
			 * Sync the DMA map for TX mbuf.
			 *
			 * XXX unaligned part doesn't have to be sync'ed,
			 *     but it's harmless...
			 */
			bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
			    dmamap->dm_mapsize,	BUS_DMASYNC_PREWRITE);
		}

#if NBPFILTER > 0
		/*
		 * Pass packet to bpf if there is a listener.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
#endif

		/*
		 * Setup the transmit descriptor.
		 */

		/* TXINT bit will be set later on the last packet. */
		txd->txd_cmd = (len - 1);
		/* But also set TXINT bit on a half of TXDESC. */
		if (sc->sc_txpending == (MEC_NTXDESC / 2))
			txd->txd_cmd |= MEC_TXCMD_TXINT;

		if (txs->txs_flags & MEC_TXS_TXDBUF)
			txd->txd_cmd |= TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen);
		if (txs->txs_flags & MEC_TXS_TXDPTR1) {
			txd->txd_cmd |= MEC_TXCMD_PTR1;
			txd->txd_ptr[0] = TXPTR_LEN(txdlen - 1) | txdaddr;
			/*
			 * Store a pointer to the packet so we can
			 * free it later.
			 */
			txs->txs_mbuf = m0;
		} else {
			txd->txd_ptr[0] = 0;
			/*
			 * In this case all data are copied to buffer in txdesc,
			 * we can free TX mbuf here.
			 */
			m_freem(m0);
		}

		DPRINTF(MEC_DEBUG_START,
		    ("mec_start: txd_cmd = 0x%llx, txd_ptr = 0x%llx\n",
		    txd->txd_cmd, txd->txd_ptr[0]));
		DPRINTF(MEC_DEBUG_START,
		    ("mec_start: len = %d (0x%04x), buflen = %d (0x%02x)\n",
		    len, len, buflen, buflen));

		/* Sync TX descriptor. */
		MEC_TXDESCSYNC(sc, nexttx,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/* Advance the TX pointer. */
		sc->sc_txpending++;
		sc->sc_txlast = nexttx;
	}

	if (sc->sc_txpending == MEC_NTXDESC) {
		/* No more slots; notify upper layer. */
		ifp->if_flags |= IFF_OACTIVE;
	}

	if (sc->sc_txpending != opending) {
		/*
		 * Cause a TX interrupt to happen on the last packet
		 * we enqueued.
		 */
		sc->sc_txdesc[sc->sc_txlast].txd_cmd |= MEC_TXCMD_TXINT;
		MEC_TXCMDSYNC(sc, sc->sc_txlast,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/* Start TX. */
		bus_space_write_8(st, sh, MEC_TX_RING_PTR,
		    MEC_NEXTTX(sc->sc_txlast));

		/*
		 * If the transmitter was idle,
		 * reset the txdirty pointer and re-enable TX interrupt.
		 */
		if (opending == 0) {
			sc->sc_txdirty = firsttx;
			bus_space_write_8(st, sh, MEC_TX_ALIAS,
			    MEC_TX_ALIAS_INT_ENABLE);
		}

		/* Set a watchdog timer in case the chip flakes out. */
		ifp->if_timer = 5;
	}
}
/*
 * ste_start:		[ifnet interface function]
 *
 *	Start packet transmission on the interface.
 */
static void
ste_start(struct ifnet *ifp)
{
	struct ste_softc *sc = ifp->if_softc;
	struct mbuf *m0, *m;
	struct ste_descsoft *ds;
	struct ste_tfd *tfd;
	bus_dmamap_t dmamap;
	int error, olasttx, nexttx, opending, seg, totlen;

	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/*
	 * Remember the previous number of pending transmissions
	 * and the current last descriptor in the list.
	 */
	opending = sc->sc_txpending;
	olasttx = sc->sc_txlast;

	/*
	 * Loop through the send queue, setting up transmit descriptors
	 * until we drain the queue, or use up all available transmit
	 * descriptors.
	 */
	while (sc->sc_txpending < STE_NTXDESC) {
		/*
		 * Grab a packet off the queue.
		 */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;
		m = NULL;

		/*
		 * Get the last and next available transmit descriptor.
		 */
		nexttx = STE_NEXTTX(sc->sc_txlast);
		tfd = &sc->sc_txdescs[nexttx];
		ds = &sc->sc_txsoft[nexttx];

		dmamap = ds->ds_dmamap;

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we
		 * were short on resources.  In this case, we'll copy
		 * and try again.
		 */
		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
		    BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: unable to allocate Tx mbuf\n",
				    device_xname(&sc->sc_dev));
				break;
			}
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_DONTWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					printf("%s: unable to allocate Tx "
					    "cluster\n", device_xname(&sc->sc_dev));
					m_freem(m);
					break;
				}
			}
			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
			if (error) {
				printf("%s: unable to load Tx buffer, "
				    "error = %d\n", device_xname(&sc->sc_dev), error);
				break;
			}
		}

		IFQ_DEQUEUE(&ifp->if_snd, m0);
		if (m != NULL) {
			m_freem(m0);
			m0 = m;
		}

		/*
		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
		 */

		/* Sync the DMA map. */
		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
		    BUS_DMASYNC_PREWRITE);

		/* Initialize the fragment list. */
		for (totlen = 0, seg = 0; seg < dmamap->dm_nsegs; seg++) {
			tfd->tfd_frags[seg].frag_addr =
			    htole32(dmamap->dm_segs[seg].ds_addr);
			tfd->tfd_frags[seg].frag_len =
			    htole32(dmamap->dm_segs[seg].ds_len);
			totlen += dmamap->dm_segs[seg].ds_len;
		}
		tfd->tfd_frags[seg - 1].frag_len |= htole32(FRAG_LAST);

		/* Initialize the descriptor. */
		tfd->tfd_next = htole32(STE_CDTXADDR(sc, nexttx));
		tfd->tfd_control = htole32(TFD_FrameId(nexttx) | (totlen & 3));

		/* Sync the descriptor. */
		STE_CDTXSYNC(sc, nexttx,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/*
		 * Store a pointer to the packet so we can free it later,
		 * and remember what txdirty will be once the packet is
		 * done.
		 */
		ds->ds_mbuf = m0;

		/* Advance the tx pointer. */
		sc->sc_txpending++;
		sc->sc_txlast = nexttx;

#if NBPFILTER > 0
		/*
		 * Pass the packet to any BPF listeners.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m0);
#endif /* NBPFILTER > 0 */
	}
/*
 * admsw_start:		[ifnet interface function]
 *
 *	Start packet transmission on the interface.
 */
static void
admsw_start(struct ifnet *ifp)
{
	struct admsw_softc *sc = ifp->if_softc;
	struct mbuf *m0, *m;
	struct admsw_descsoft *ds;
	struct admsw_desc *desc;
	bus_dmamap_t dmamap;
	struct ether_header *eh;
	int error, nexttx, len, i;
	static int vlan = 0;

	/*
	 * Loop through the send queues, setting up transmit descriptors
	 * unitl we drain the queues, or use up all available transmit
	 * descriptors.
	 */
	for (;;) {
		vlan++;
		if (vlan == SW_DEVS)
			vlan = 0;
		i = vlan;
		for (;;) {
			ifp = &sc->sc_ethercom[i].ec_if;
			if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) ==
			    IFF_RUNNING) {
				/* Grab a packet off the queue. */
				IFQ_POLL(&ifp->if_snd, m0);
				if (m0 != NULL)
					break;
			}
			i++;
			if (i == SW_DEVS)
				i = 0;
			if (i == vlan)
				return;
		}
		vlan = i;
		m = NULL;

		/* Get a spare descriptor. */
		if (sc->sc_txfree == 0) {
			/* No more slots left; notify upper layer. */
			ifp->if_flags |= IFF_OACTIVE;
			ADMSW_EVCNT_INCR(&sc->sc_ev_txstall);
			break;
		}
		nexttx = sc->sc_txnext;
		desc = &sc->sc_txldescs[nexttx];
		ds = &sc->sc_txlsoft[nexttx];
		dmamap = ds->ds_dmamap;

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we
		 * were short on resources.  In this case, we'll copy
		 * and try again.
		 */
		if (m0->m_pkthdr.len < ETHER_MIN_LEN ||
		    bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
		    BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: unable to allocate Tx mbuf\n",
				    device_xname(sc->sc_dev));
				break;
			}
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_DONTWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					printf("%s: unable to allocate Tx "
					    "cluster\n", device_xname(sc->sc_dev));
					m_freem(m);
					break;
				}
			}
			m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags;
			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
			if (m->m_pkthdr.len < ETHER_MIN_LEN) {
				if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len)
					panic("admsw_start: M_TRAILINGSPACE\n");
				memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0,
				    ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len);
				m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN;
			}
			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
			if (error) {
				printf("%s: unable to load Tx buffer, "
				    "error = %d\n", device_xname(sc->sc_dev), error);
				break;
			}
		}

		IFQ_DEQUEUE(&ifp->if_snd, m0);
		if (m != NULL) {
			m_freem(m0);
			m0 = m;
		}

		/*
		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
		 */

		/* Sync the DMA map. */
		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
		    BUS_DMASYNC_PREWRITE);

		if (dmamap->dm_nsegs != 1 && dmamap->dm_nsegs != 2)
			panic("admsw_start: dm_nsegs == %d\n", dmamap->dm_nsegs);
		desc->data = dmamap->dm_segs[0].ds_addr;
		desc->len = len = dmamap->dm_segs[0].ds_len;
		if (dmamap->dm_nsegs > 1) {
			len += dmamap->dm_segs[1].ds_len;
			desc->cntl = dmamap->dm_segs[1].ds_addr | ADM5120_DMA_BUF2ENABLE;
		} else
			desc->cntl = 0;
		desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan);
		eh = mtod(m0, struct ether_header *);
		if (ntohs(eh->ether_type) == ETHERTYPE_IP &&
		    m0->m_pkthdr.csum_flags & M_CSUM_IPv4)
			desc->status |= ADM5120_DMA_CSUM;
		if (nexttx == ADMSW_NTXLDESC - 1)
			desc->data |= ADM5120_DMA_RINGEND;
		desc->data |= ADM5120_DMA_OWN;

		/* Sync the descriptor. */
		ADMSW_CDTXLSYNC(sc, nexttx,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		REG_WRITE(SEND_TRIG_REG, 1);
		/* printf("send slot %d\n",nexttx); */

		/*
		 * Store a pointer to the packet so we can free it later.
		 */
		ds->ds_mbuf = m0;

		/* Advance the Tx pointer. */
		sc->sc_txfree--;
		sc->sc_txnext = ADMSW_NEXTTXL(nexttx);

		/* Pass the packet to any BPF listeners. */
		bpf_mtap(ifp, m0);

		/* Set a watchdog timer in case the chip flakes out. */
		sc->sc_ethercom[0].ec_if.if_timer = 5;
	}
Example #28
0
/*
 * Start output on interface.
 */
void
qestart(struct ifnet *ifp)
{
	struct qe_softc *sc = ifp->if_softc;
	struct qe_cdata *qc = sc->sc_qedata;
	paddr_t	buffer;
	struct mbuf *m, *m0;
	int idx, len, s, i, totlen, buflen;
	short orword, csr;

	if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
		return;

	s = splnet();
	while (sc->sc_inq < (TXDESCS - 1)) {

		if (sc->sc_setup) {
			qe_setup(sc);
			continue;
		}
		idx = sc->sc_nexttx;
		IFQ_POLL(&ifp->if_snd, m);
		if (m == 0)
			goto out;
		/*
		 * Count number of mbufs in chain.
		 * Always do DMA directly from mbufs, therefore the transmit
		 * ring is really big.
		 */
		for (m0 = m, i = 0; m0; m0 = m0->m_next)
			if (m0->m_len)
				i++;
		if (m->m_pkthdr.len < ETHER_PAD_LEN) {
			buflen = ETHER_PAD_LEN;
			i++;
		} else
			buflen = m->m_pkthdr.len;
		if (i >= TXDESCS)
			panic("qestart");

		if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
			ifp->if_flags |= IFF_OACTIVE;
			goto out;
		}

		IFQ_DEQUEUE(&ifp->if_snd, m);

		bpf_mtap(ifp, m);
		/*
		 * m now points to a mbuf chain that can be loaded.
		 * Loop around and set it.
		 */
		totlen = 0;
		for (m0 = m; ; m0 = m0->m_next) {
			if (m0) {
				if (m0->m_len == 0)
					continue;
				bus_dmamap_load(sc->sc_dmat,
				    sc->sc_xmtmap[idx], mtod(m0, void *),
				    m0->m_len, 0, 0);
				buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
				len = m0->m_len;
			} else if (totlen < ETHER_PAD_LEN) {
				buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr;
				len = ETHER_PAD_LEN - totlen;
			} else {
				break;
			}

			totlen += len;
			/* Word alignment calc */
			orword = 0;
			if (totlen == buflen) {
				orword |= QE_EOMSG;
				sc->sc_txmbuf[idx] = m;
			}
			if ((buffer & 1) || (len & 1))
				len += 2;
			if (buffer & 1)
				orword |= QE_ODDBEGIN;
			if ((buffer + len) & 1)
				orword |= QE_ODDEND;
			qc->qc_xmit[idx].qe_buf_len = -(len/2);
			qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
			qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
			qc->qc_xmit[idx].qe_flag =
			    qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
			qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
			if (++idx == TXDESCS)
				idx = 0;
			sc->sc_inq++;
			if (m0 == NULL)
				break;
		}
#ifdef DIAGNOSTIC
		if (totlen != buflen)
			panic("qestart: len fault");
#endif

		/*
		 * Kick off the transmit logic, if it is stopped.
		 */
		csr = QE_RCSR(QE_CSR_CSR);
		if (csr & QE_XL_INVALID) {
			QE_WCSR(QE_CSR_XMTL,
			    LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
			QE_WCSR(QE_CSR_XMTH,
			    HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
		}
		sc->sc_nexttx = idx;
	}
Example #29
0
void
sq_start(struct ifnet *ifp)
{
	struct sq_softc *sc = ifp->if_softc;
	uint32_t status;
	struct mbuf *m0, *m;
	bus_dmamap_t dmamap;
	int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;

	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/*
	 * Remember the previous number of free descriptors and
	 * the first descriptor we'll use.
	 */
	ofree = sc->sc_nfreetx;
	firsttx = sc->sc_nexttx;

	/*
	 * Loop through the send queue, setting up transmit descriptors
	 * until we drain the queue, or use up all available transmit
	 * descriptors.
	 */
	while (sc->sc_nfreetx != 0) {
		/*
		 * Grab a packet off the queue.
		 */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;
		m = NULL;

		dmamap = sc->sc_txmap[sc->sc_nexttx];

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we were
		 * short on resources.  In this case, we'll copy and try
		 * again.
		 * Also copy it if we need to pad, so that we are sure there
		 * is room for the pad buffer.
		 * XXX the right way of doing this is to use a static buffer
		 * for padding and adding it to the transmit descriptor (see
		 * sys/dev/pci/if_tl.c for example). We can't do this here yet
		 * because we can't send packets with more than one fragment.
		 */
		if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
		    bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
		    BUS_DMA_NOWAIT) != 0) {
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: unable to allocate Tx mbuf\n",
				    device_xname(sc->sc_dev));
				break;
			}
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_DONTWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					printf("%s: unable to allocate Tx "
					    "cluster\n",
					    device_xname(sc->sc_dev));
					m_freem(m);
					break;
				}
			}

			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
			if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
				memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
				    ETHER_PAD_LEN - m0->m_pkthdr.len);
				m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
			} else
Example #30
0
void
vnet_start(struct ifnet *ifp)
{
	struct vnet_softc *sc = ifp->if_softc;
	struct ldc_conn *lc = &sc->sc_lc;
	struct ldc_map *map = sc->sc_lm;
	struct mbuf *m;
	paddr_t pa;
	caddr_t buf;
	uint64_t tx_head, tx_tail, tx_state;
	u_int start, prod, count;
	int err;

	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		return;

	if (IFQ_IS_EMPTY(&ifp->if_snd))
		return;

	/*
	 * We cannot transmit packets until a VIO connection has been
	 * established.
	 */
	if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) ||
	    !ISSET(sc->sc_vio_state, VIO_ACK_RDX))
		return;

	/*
	 * Make sure there is room in the LDC transmit queue to send a
	 * DRING_DATA message.
	 */
	err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state);
	if (err != H_EOK)
		return;
	tx_tail += sizeof(struct ldc_pkt);
	tx_tail &= ((lc->lc_txq->lq_nentries * sizeof(struct ldc_pkt)) - 1);
	if (tx_tail == tx_head) {
		ifp->if_flags |= IFF_OACTIVE;
		return;
	}

	if (sc->sc_xfer_mode == VIO_DESC_MODE) {
		vnet_start_desc(ifp);
		return;
	}

	start = prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1);
	while (sc->sc_vd->vd_desc[prod].hdr.dstate == VIO_DESC_FREE) {
		IFQ_POLL(&ifp->if_snd, m);
		if (m == NULL)
			break;

		count = sc->sc_tx_prod - sc->sc_tx_cons;
		if (count >= (sc->sc_vd->vd_nentries - 1) ||
		    map->lm_count >= map->lm_nentries) {
			ifp->if_flags |= IFF_OACTIVE;
			break;
		}

		buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO);
		if (buf == NULL) {
			ifp->if_flags |= IFF_OACTIVE;
			break;
		}
		m_copydata(m, 0, m->m_pkthdr.len, buf + VNET_ETHER_ALIGN);
		IFQ_DEQUEUE(&ifp->if_snd, m);

#if NBPFILTER > 0
		/*
		 * If BPF is listening on this interface, let it see the
		 * packet before we commit it to the wire.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif

		pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa);
		KASSERT((pa & ~PAGE_MASK) == (pa & LDC_MTE_RA_MASK));
		while (map->lm_slot[map->lm_next].entry != 0) {
			map->lm_next++;
			map->lm_next &= (map->lm_nentries - 1);
		}
		map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK);
		map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR;
		atomic_inc_int(&map->lm_count);

		sc->sc_vd->vd_desc[prod].nbytes = max(m->m_pkthdr.len, 60);
		sc->sc_vd->vd_desc[prod].ncookies = 1;
		sc->sc_vd->vd_desc[prod].cookie[0].addr =
		    map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK);
		sc->sc_vd->vd_desc[prod].cookie[0].size = 2048;
		membar_producer();
		sc->sc_vd->vd_desc[prod].hdr.dstate = VIO_DESC_READY;

		sc->sc_vsd[prod].vsd_map_idx = map->lm_next;
		sc->sc_vsd[prod].vsd_buf = buf;

		sc->sc_tx_prod++;
		prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1);

		m_freem(m);
	}

	membar_producer();

	if (start != prod && sc->sc_peer_state != VIO_DP_ACTIVE) {
		vnet_send_dring_data(sc, start);
		ifp->if_timer = 5;
	}
}