Ejemplo n.º 1
0
int
tsec_encap(struct tsec_softc *sc, struct mbuf *m, int *idx)
{
	struct tsec_desc *txd;
	bus_dmamap_t map;
	int cur, frag, i;
	uint16_t status;

	cur = frag = *idx;
	map = sc->sc_txbuf[cur].tb_map;

	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
		return (ENOBUFS);

	if (map->dm_nsegs > (TSEC_NTXDESC - sc->sc_tx_cnt - 2)) {
		bus_dmamap_unload(sc->sc_dmat, map);
		return (ENOBUFS);
	}

	/* Sync the DMA map. */
	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
	    BUS_DMASYNC_PREWRITE);

	txd = &sc->sc_txdesc[frag];
	for (i = 0; i < map->dm_nsegs; i++) {
		status = txd->td_status & TSEC_TX_W;
		status |= TSEC_TX_TO1;
		if (i == (map->dm_nsegs - 1))
			status |= TSEC_TX_L;
		txd->td_len = map->dm_segs[i].ds_len;
		txd->td_addr = map->dm_segs[i].ds_addr;
		__asm volatile("eieio" ::: "memory");
		txd->td_status = status | TSEC_TX_R | TSEC_TX_I | TSEC_TX_TC;

		bus_dmamap_sync(sc->sc_dmat, TSEC_DMA_MAP(sc->sc_txring),
		    frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);

		cur = frag;
		if (status & TSEC_TX_W) {
			txd = &sc->sc_txdesc[0];
			frag = 0;
		} else {
			txd++;
			frag++;
		}
		KASSERT(frag != sc->sc_tx_cons);

		tsec_write(sc, TSEC_TSTAT, TSEC_TSTAT_THLT);
	}

	KASSERT(sc->sc_txbuf[cur].tb_m == NULL);
	sc->sc_txbuf[*idx].tb_map = sc->sc_txbuf[cur].tb_map;
	sc->sc_txbuf[cur].tb_map = map;
	sc->sc_txbuf[cur].tb_m = m;

	sc->sc_tx_cnt += map->dm_nsegs;
	*idx = frag;

	return (0);
}
Ejemplo n.º 2
0
int
cpsw_new_rxbuf(struct cpsw_softc * const sc, const u_int i)
{
	struct cpsw_ring_data * const rdp = sc->sc_rdp;
	const u_int h = RXDESC_PREV(i);
	struct cpsw_cpdma_bd bd;
	struct mbuf *m;
	int error = ENOBUFS;

	MGETHDR(m, M_DONTWAIT, MT_DATA);
	if (m == NULL) {
		goto reuse;
	}

	MCLGET(m, M_DONTWAIT);
	if ((m->m_flags & M_EXT) == 0) {
		m_freem(m);
		goto reuse;
	}

	/* We have a new buffer, prepare it for the ring. */

	if (rdp->rx_mb[i] != NULL)
		bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);

	m->m_len = m->m_pkthdr.len = MCLBYTES;

	rdp->rx_mb[i] = m;

	error = bus_dmamap_load_mbuf(sc->sc_bdt, rdp->rx_dm[i], rdp->rx_mb[i],
	    BUS_DMA_READ|BUS_DMA_NOWAIT);
	if (error) {
		printf("can't load rx DMA map %d: %d\n", i, error);
	}

	bus_dmamap_sync(sc->sc_bdt, rdp->rx_dm[i],
	    0, rdp->rx_dm[i]->dm_mapsize, BUS_DMASYNC_PREREAD);

	error = 0;

reuse:
	/* (re-)setup the descriptor */
	bd.next = 0;
	bd.bufptr = rdp->rx_dm[i]->dm_segs[0].ds_addr;
	bd.bufoff = 0;
	bd.buflen = MIN(0x7ff, rdp->rx_dm[i]->dm_segs[0].ds_len);
	bd.pktlen = 0;
	bd.flags = CPDMA_BD_OWNER;

	cpsw_set_rxdesc(sc, i, &bd);
	/* and link onto ring */
	cpsw_set_rxdesc_next(sc, h, cpsw_rxdesc_paddr(sc, i));

	return error;
}
Ejemplo n.º 3
0
int
cas_encap(struct cas_softc *sc, struct mbuf *mhead, u_int32_t *bixp)
{
	u_int64_t flags;
	u_int32_t cur, frag, i;
	bus_dmamap_t map;

	cur = frag = *bixp;
	map = sc->sc_txd[cur].sd_map;

	if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead,
	    BUS_DMA_NOWAIT) != 0) {
		return (ENOBUFS);
	}

	if ((sc->sc_tx_cnt + map->dm_nsegs) > (CAS_NTXDESC - 2)) {
		bus_dmamap_unload(sc->sc_dmatag, map);
		return (ENOBUFS);
	}

	bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
	    BUS_DMASYNC_PREWRITE);

	for (i = 0; i < map->dm_nsegs; i++) {
		sc->sc_txdescs[frag].cd_addr =
		    CAS_DMA_WRITE(map->dm_segs[i].ds_addr);
		flags = (map->dm_segs[i].ds_len & CAS_TD_BUFSIZE) |
		    (i == 0 ? CAS_TD_START_OF_PACKET : 0) |
		    ((i == (map->dm_nsegs - 1)) ? CAS_TD_END_OF_PACKET : 0);
		sc->sc_txdescs[frag].cd_flags = CAS_DMA_WRITE(flags);
		bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
		    CAS_CDTXOFF(frag), sizeof(struct cas_desc),
		    BUS_DMASYNC_PREWRITE);
		cur = frag;
		if (++frag == CAS_NTXDESC)
			frag = 0;
	}

	sc->sc_tx_cnt += map->dm_nsegs;
	sc->sc_txd[*bixp].sd_map = sc->sc_txd[cur].sd_map;
	sc->sc_txd[cur].sd_map = map;
	sc->sc_txd[cur].sd_mbuf = mhead;

	bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_TX_KICK, frag);

	*bixp = frag;

	/* sync descriptors */

	return (0);
}
Ejemplo n.º 4
0
struct mbuf *
tsec_alloc_mbuf(struct tsec_softc *sc, bus_dmamap_t map)
{
	struct mbuf *m = NULL;

	m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_ac.ac_if, MCLBYTES);
	if (!m)
		return (NULL);
	m->m_len = m->m_pkthdr.len = MCLBYTES;

	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
		m_freem(m);
		return (NULL);
	}

	bus_dmamap_sync(sc->sc_dmat, map, 0,
	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);

	return (m);
}
Ejemplo n.º 5
0
extern struct mbuf *
pdq_os_databuf_alloc(
    pdq_os_ctx_t *sc)
{
    struct mbuf *m;
    bus_dmamap_t map;

    MGETHDR(m, M_NOWAIT, MT_DATA);
    if (m == NULL) {
	printf("%s: can't alloc small buf\n", sc->sc_dev.dv_xname);
	return NULL;
    }
    MCLGET(m, M_NOWAIT);
    if ((m->m_flags & M_EXT) == 0) {
	printf("%s: can't alloc cluster\n", sc->sc_dev.dv_xname);
        m_free(m);
	return NULL;
    }
    m->m_pkthdr.len = m->m_len = PDQ_OS_DATABUF_SIZE;

    if (bus_dmamap_create(sc->sc_dmatag, PDQ_OS_DATABUF_SIZE,
			   1, PDQ_OS_DATABUF_SIZE, 0, BUS_DMA_NOWAIT, &map)) {
	printf("%s: can't create dmamap\n", sc->sc_dev.dv_xname);
	m_free(m);
	return NULL;
    }
    if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
    			     BUS_DMA_READ|BUS_DMA_NOWAIT)) {
	printf("%s: can't load dmamap\n", sc->sc_dev.dv_xname);
	bus_dmamap_destroy(sc->sc_dmatag, map);
	m_free(m);
	return NULL;
    }
    m->m_flags |= M_HASRXDMAMAP;
    M_SETCTX(m, map);
    return m;
}
Ejemplo n.º 6
0
/* Start packet transmission on the interface. */
void
bce_start(struct ifnet *ifp)
{
	struct bce_softc *sc = ifp->if_softc;
	struct mbuf    *m0;
	bus_dmamap_t    dmamap;
	int             txstart;
	int             txsfree;
	int             newpkts = 0;
	int             error;

	/*
         * do not start another if currently transmitting, and more
         * descriptors(tx slots) are needed for next packet.
         */
	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/* determine number of descriptors available */
	if (sc->bce_txsnext >= sc->bce_txin)
		txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext;
	else
		txsfree = sc->bce_txin - sc->bce_txsnext - 1;

	/*
         * Loop through the send queue, setting up transmit descriptors
         * until we drain the queue, or use up all available transmit
         * descriptors.
         */
	while (txsfree > 0) {
		int             seg;

		/* Grab a packet off the queue. */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;

		/* get the transmit slot dma map */
		dmamap = sc->bce_cdata.bce_tx_map[sc->bce_txsnext];

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we
		 * were short on resources. If the packet will not fit,
		 * it will be dropped. If short on resources, it will
		 * be tried again later.
		 */
		error = bus_dmamap_load_mbuf(sc->bce_dmatag, dmamap, m0,
		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
		if (error == EFBIG) {
			printf("%s: Tx packet consumes too many DMA segments, "
			    "dropping...\n", sc->bce_dev.dv_xname);
			IFQ_DEQUEUE(&ifp->if_snd, m0);
			m_freem(m0);
			ifp->if_oerrors++;
			continue;
		} else if (error) {
			/* short on resources, come back later */
			printf("%s: unable to load Tx buffer, error = %d\n",
			    sc->bce_dev.dv_xname, error);
			break;
		}
		/* If not enough descriptors available, try again later */
		if (dmamap->dm_nsegs > txsfree) {
			ifp->if_flags |= IFF_OACTIVE;
			bus_dmamap_unload(sc->bce_dmatag, dmamap);
			break;
		}
		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */

		/* So take it off the queue */
		IFQ_DEQUEUE(&ifp->if_snd, m0);

		/* save the pointer so it can be freed later */
		sc->bce_cdata.bce_tx_chain[sc->bce_txsnext] = m0;

		/* Sync the data DMA map. */
		bus_dmamap_sync(sc->bce_dmatag, dmamap, 0, dmamap->dm_mapsize,
				BUS_DMASYNC_PREWRITE);

		/* Initialize the transmit descriptor(s). */
		txstart = sc->bce_txsnext;
		for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
			u_int32_t ctrl;

			ctrl = dmamap->dm_segs[seg].ds_len & CTRL_BC_MASK;
			if (seg == 0)
				ctrl |= CTRL_SOF;
			if (seg == dmamap->dm_nsegs - 1)
				ctrl |= CTRL_EOF;
			if (sc->bce_txsnext == BCE_NTXDESC - 1)
				ctrl |= CTRL_EOT;
			ctrl |= CTRL_IOC;
			sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl);
			sc->bce_tx_ring[sc->bce_txsnext].addr =
			    htole32(dmamap->dm_segs[seg].ds_addr + 0x40000000);	/* MAGIC */
			if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1)
				sc->bce_txsnext = 0;
			else
				sc->bce_txsnext++;
			txsfree--;
		}
		/* sync descriptors being used */
		bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
			  sizeof(struct bce_dma_slot) * txstart + PAGE_SIZE,
			     sizeof(struct bce_dma_slot) * dmamap->dm_nsegs,
				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

		/* Give the packet to the chip. */
		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR,
			     sc->bce_txsnext * sizeof(struct bce_dma_slot));

		newpkts++;

#if NBPFILTER > 0
		/* Pass the packet to any BPF listeners. */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
#endif				/* NBPFILTER > 0 */
	}
	if (txsfree == 0) {
		/* No more slots left; notify upper layer. */
		ifp->if_flags |= IFF_OACTIVE;
	}
	if (newpkts) {
		/* Set a watchdog timer in case the chip flakes out. */
		ifp->if_timer = 5;
	}
}
Ejemplo n.º 7
0
/*
 * admsw_start:		[ifnet interface function]
 *
 *	Start packet transmission on the interface.
 */
static void
admsw_start(struct ifnet *ifp)
{
	struct admsw_softc *sc = ifp->if_softc;
	struct mbuf *m0, *m;
	struct admsw_descsoft *ds;
	struct admsw_desc *desc;
	bus_dmamap_t dmamap;
	struct ether_header *eh;
	int error, nexttx, len, i;
	static int vlan = 0;

	/*
	 * Loop through the send queues, setting up transmit descriptors
	 * unitl we drain the queues, or use up all available transmit
	 * descriptors.
	 */
	for (;;) {
		vlan++;
		if (vlan == SW_DEVS)
			vlan = 0;
		i = vlan;
		for (;;) {
			ifp = sc->sc_ifnet[i];
			if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) 
			    == IFF_DRV_RUNNING) {
				/* Grab a packet off the queue. */
				IF_DEQUEUE(&ifp->if_snd, m0);
				if (m0 != NULL)
					break;
			}
			i++;
			if (i == SW_DEVS)
				i = 0;
			if (i == vlan)
				return;
		}
		vlan = i;
		m = NULL;

		/* Get a spare descriptor. */
		if (sc->sc_txfree == 0) {
			/* No more slots left; notify upper layer. */
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
			break;
		}
		nexttx = sc->sc_txnext;
		desc = &sc->sc_txldescs[nexttx];
		ds = &sc->sc_txlsoft[nexttx];
		dmamap = ds->ds_dmamap;

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we
		 * were short on resources.  In this case, we'll copy
		 * and try again.
		 */
		if (m0->m_pkthdr.len < ETHER_MIN_LEN ||
		    bus_dmamap_load_mbuf(sc->sc_bufs_dmat, dmamap, m0,
		    admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT) != 0) {
			MGETHDR(m, M_NOWAIT, MT_DATA);
			if (m == NULL) {
				device_printf(sc->sc_dev, 
				    "unable to allocate Tx mbuf\n");
				break;
			}
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_NOWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					device_printf(sc->sc_dev, 
					    "unable to allocate Tx cluster\n");
					m_freem(m);
					break;
				}
			}
			m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags;
			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
			if (m->m_pkthdr.len < ETHER_MIN_LEN) {
				if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len)
					panic("admsw_start: M_TRAILINGSPACE\n");
				memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0,
				    ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len);
				m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN;
			}
			error = bus_dmamap_load_mbuf(sc->sc_bufs_dmat, 
			    dmamap, m, admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT);
			if (error) {
				device_printf(sc->sc_dev, 
				    "unable to load Tx buffer, error = %d\n", 
				    error);
				break;
			}
		}

		if (m != NULL) {
			m_freem(m0);
			m0 = m;
		}

		/*
		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
		 */

		/* Sync the DMA map. */
		bus_dmamap_sync(sc->sc_bufs_dmat, dmamap, BUS_DMASYNC_PREWRITE);

		if (ds->ds_nsegs != 1 && ds->ds_nsegs != 2)
			panic("admsw_start: nsegs == %d\n", ds->ds_nsegs);
		desc->data = ds->ds_addr[0];
		desc->len = len = ds->ds_len[0];
		if (ds->ds_nsegs > 1) {
			len += ds->ds_len[1];
			desc->cntl = ds->ds_addr[1] | ADM5120_DMA_BUF2ENABLE;
		} else
			desc->cntl = 0;
		desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan);
		eh = mtod(m0, struct ether_header *);
		if (ntohs(eh->ether_type) == ETHERTYPE_IP &&
		    m0->m_pkthdr.csum_flags & CSUM_IP)
			desc->status |= ADM5120_DMA_CSUM;
		if (nexttx == ADMSW_NTXLDESC - 1)
			desc->data |= ADM5120_DMA_RINGEND;
		desc->data |= ADM5120_DMA_OWN;

		/* Sync the descriptor. */
		ADMSW_CDTXLSYNC(sc, nexttx,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		REG_WRITE(SEND_TRIG_REG, 1);
		/* printf("send slot %d\n",nexttx); */

		/*
		 * Store a pointer to the packet so we can free it later.
		 */
		ds->ds_mbuf = m0;

		/* Advance the Tx pointer. */
		sc->sc_txfree--;
		sc->sc_txnext = ADMSW_NEXTTXL(nexttx);

		/* Pass the packet to any BPF listeners. */
		BPF_MTAP(ifp, m0);

		/* Set a watchdog timer in case the chip flakes out. */
		sc->sc_timer = 5;
	}
Ejemplo n.º 8
0
Archivo: if_sq.c Proyecto: MarginC/kame
void
sq_start(struct ifnet *ifp)
{
	struct sq_softc *sc = ifp->if_softc;
	u_int32_t status;
	struct mbuf *m0, *m;
	bus_dmamap_t dmamap;
	int err, totlen, nexttx, firsttx, lasttx, ofree, seg;

	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/*
	 * Remember the previous number of free descriptors and
	 * the first descriptor we'll use.
	 */
	ofree = sc->sc_nfreetx;
	firsttx = sc->sc_nexttx;

	/*
	 * Loop through the send queue, setting up transmit descriptors
	 * until we drain the queue, or use up all available transmit
	 * descriptors.
	 */
	while (sc->sc_nfreetx != 0) {
		/*
		 * Grab a packet off the queue.
		 */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;
		m = NULL;

		dmamap = sc->sc_txmap[sc->sc_nexttx];

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we were
		 * short on resources.  In this case, we'll copy and try
		 * again.
		 */
		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
						      BUS_DMA_NOWAIT) != 0) {
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: unable to allocate Tx mbuf\n",
				    sc->sc_dev.dv_xname);
				break;
			}
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_DONTWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					printf("%s: unable to allocate Tx "
					    "cluster\n", sc->sc_dev.dv_xname);
					m_freem(m);
					break;
				}
			}

			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;

			if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
						m, BUS_DMA_NOWAIT)) != 0) {
				printf("%s: unable to load Tx buffer, "
				    "error = %d\n", sc->sc_dev.dv_xname, err);
				break;
			}
		}

		/*
		 * Ensure we have enough descriptors free to describe
		 * the packet.
		 */
		if (dmamap->dm_nsegs > sc->sc_nfreetx) {
			/*
			 * Not enough free descriptors to transmit this
			 * packet.  We haven't committed to anything yet,
			 * so just unload the DMA map, put the packet
			 * back on the queue, and punt.  Notify the upper
			 * layer that there are no more slots left.
			 *
			 * XXX We could allocate an mbuf and copy, but
			 * XXX it is worth it?
			 */
			ifp->if_flags |= IFF_OACTIVE;
			bus_dmamap_unload(sc->sc_dmat, dmamap);
			if (m != NULL)
				m_freem(m);
			break;
		}

		IFQ_DEQUEUE(&ifp->if_snd, m0);
		if (m != NULL) {
			m_freem(m0);
			m0 = m;
		}

		/*
		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
		 */

		/* Sync the DMA map. */
		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
		    BUS_DMASYNC_PREWRITE);

		/*
		 * Initialize the transmit descriptors.
		 */
		for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
		     seg < dmamap->dm_nsegs;
		     seg++, nexttx = SQ_NEXTTX(nexttx)) {
			sc->sc_txdesc[nexttx].hdd_bufptr =
					    dmamap->dm_segs[seg].ds_addr;
			sc->sc_txdesc[nexttx].hdd_ctl =
					    dmamap->dm_segs[seg].ds_len;
			sc->sc_txdesc[nexttx].hdd_descptr=
					    SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
			lasttx = nexttx;
			totlen += dmamap->dm_segs[seg].ds_len;
		}

		/* Last descriptor gets end-of-packet */
		sc->sc_txdesc[lasttx].hdd_ctl |= HDD_CTL_EOPACKET;

		/* XXXrkb: if not EDLC, pad to min len manually */
		if (totlen < ETHER_MIN_LEN) {
		    sc->sc_txdesc[lasttx].hdd_ctl += (ETHER_MIN_LEN - totlen);
		    totlen = ETHER_MIN_LEN;
		}

#if 0
		printf("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
						       sc->sc_nexttx, lasttx,
						       totlen);
#endif

		if (ifp->if_flags & IFF_DEBUG) {
			printf("     transmit chain:\n");
			for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
				printf("     descriptor %d:\n", seg);
				printf("       hdd_bufptr:      0x%08x\n",
					sc->sc_txdesc[seg].hdd_bufptr);
				printf("       hdd_ctl: 0x%08x\n",
					sc->sc_txdesc[seg].hdd_ctl);
				printf("       hdd_descptr:      0x%08x\n",
					sc->sc_txdesc[seg].hdd_descptr);

				if (seg == lasttx)
					break;
			}
		}

		/* Sync the descriptors we're using. */
		SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
				BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/* Store a pointer to the packet so we can free it later */
		sc->sc_txmbuf[sc->sc_nexttx] = m0;

		/* Advance the tx pointer. */
		sc->sc_nfreetx -= dmamap->dm_nsegs;
		sc->sc_nexttx = nexttx;

#if NBPFILTER > 0
		/*
		 * Pass the packet to any BPF listeners.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m0);
#endif /* NBPFILTER > 0 */
	}

	/* All transmit descriptors used up, let upper layers know */
	if (sc->sc_nfreetx == 0)
		ifp->if_flags |= IFF_OACTIVE;

	if (sc->sc_nfreetx != ofree) {
#if 0
		printf("%s: %d packets enqueued, first %d, INTR on %d\n",
			    sc->sc_dev.dv_xname, lasttx - firsttx + 1,
			    firsttx, lasttx);
#endif

		/*
		 * Cause a transmit interrupt to happen on the
		 * last packet we enqueued, mark it as the last
		 * descriptor.
		 */
		sc->sc_txdesc[lasttx].hdd_ctl |= (HDD_CTL_INTR |
						  HDD_CTL_EOCHAIN);
		SQ_CDTXSYNC(sc, lasttx, 1,
				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

		/*
		 * There is a potential race condition here if the HPC
		 * DMA channel is active and we try and either update
		 * the 'next descriptor' pointer in the HPC PIO space
		 * or the 'next descriptor' pointer in a previous desc-
		 * riptor.
		 *
		 * To avoid this, if the channel is active, we rely on
		 * the transmit interrupt routine noticing that there
		 * are more packets to send and restarting the HPC DMA
		 * engine, rather than mucking with the DMA state here.
		 */
		status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
						       HPC_ENETX_CTL);

		if ((status & ENETX_CTL_ACTIVE) != 0) {
			SQ_TRACE(SQ_ADD_TO_DMA, firsttx, status,
			    sc->sc_nfreetx);
			sc->sc_txdesc[SQ_PREVTX(firsttx)].hdd_ctl &=
			    ~HDD_CTL_EOCHAIN;
			SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx),  1,
			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
		} else {
			SQ_TRACE(SQ_START_DMA, firsttx, status, sc->sc_nfreetx);

			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
			    HPC_ENETX_NDBP, SQ_CDTXADDR(sc, firsttx));

			/* Kick DMA channel into life */
			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
			    HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
		}

		/* Set a watchdog timer in case the chip flakes out. */
		ifp->if_timer = 5;
	}
}
Ejemplo n.º 9
0
void
mec_start(struct ifnet *ifp)
{
	struct mec_softc *sc = ifp->if_softc;
	struct mbuf *m0;
	struct mec_txdesc *txd;
	struct mec_txsoft *txs;
	bus_dmamap_t dmamap;
	bus_space_tag_t st = sc->sc_st;
	bus_space_handle_t sh = sc->sc_sh;
	uint64_t txdaddr;
	int error, firsttx, nexttx, opending;
	int len, bufoff, buflen, unaligned, txdlen;

	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/*
	 * Remember the previous txpending and the first transmit descriptor.
	 */
	opending = sc->sc_txpending;
	firsttx = MEC_NEXTTX(sc->sc_txlast);

	DPRINTF(MEC_DEBUG_START,
	    ("mec_start: opending = %d, firsttx = %d\n", opending, firsttx));

	for (;;) {
		/* Grab a packet off the queue. */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;

		if (sc->sc_txpending == MEC_NTXDESC) {
			break;
		}

		/*
		 * Get the next available transmit descriptor.
		 */
		nexttx = MEC_NEXTTX(sc->sc_txlast);
		txd = &sc->sc_txdesc[nexttx];
		txs = &sc->sc_txsoft[nexttx];

		buflen = 0;
		bufoff = 0;
		txdaddr = 0; /* XXX gcc */
		txdlen = 0; /* XXX gcc */

		len = m0->m_pkthdr.len;

		DPRINTF(MEC_DEBUG_START,
		    ("mec_start: len = %d, nexttx = %d\n", len, nexttx));

		IFQ_DEQUEUE(&ifp->if_snd, m0);
		if (len < ETHER_PAD_LEN) {
			/*
			 * I don't know if MEC chip does auto padding,
			 * so if the packet is small enough,
			 * just copy it to the buffer in txdesc.
			 * Maybe this is the simple way.
			 */
			DPRINTF(MEC_DEBUG_START, ("mec_start: short packet\n"));

			bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN);
			m_copydata(m0, 0, m0->m_pkthdr.len,
			    txd->txd_buf + bufoff);
			memset(txd->txd_buf + bufoff + len, 0,
			    ETHER_PAD_LEN - len);
			len = buflen = ETHER_PAD_LEN;

			txs->txs_flags = MEC_TXS_TXDBUF | buflen;
		} else {
			/*
			 * If the packet won't fit the buffer in txdesc,
			 * we have to use concatenate pointer to handle it.
			 * While MEC can handle up to three segments to
			 * concatenate, MEC requires that both the second and
			 * third segments have to be 8 byte aligned.
			 * Since it's unlikely for mbuf clusters, we use
			 * only the first concatenate pointer. If the packet
			 * doesn't fit in one DMA segment, allocate new mbuf
			 * and copy the packet to it.
			 *
			 * Besides, if the start address of the first segments
			 * is not 8 byte aligned, such part have to be copied
			 * to the txdesc buffer. (XXX see below comments)
	                 */
			DPRINTF(MEC_DEBUG_START, ("mec_start: long packet\n"));

			dmamap = txs->txs_dmamap;
			if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
			    BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
				struct mbuf *m;

				DPRINTF(MEC_DEBUG_START,
				    ("mec_start: re-allocating mbuf\n"));
				MGETHDR(m, M_DONTWAIT, MT_DATA);
				if (m == NULL) {
					printf("%s: unable to allocate "
					    "TX mbuf\n", sc->sc_dev.dv_xname);
					break;
				}
				if (len > (MHLEN - ETHER_ALIGN)) {
					MCLGET(m, M_DONTWAIT);
					if ((m->m_flags & M_EXT) == 0) {
						printf("%s: unable to allocate "
						    "TX cluster\n",
						    sc->sc_dev.dv_xname);
						m_freem(m);
						break;
					}
				}
				/*
				 * Each packet has the Ethernet header, so
				 * in many cases the header isn't 4-byte aligned
				 * and data after the header is 4-byte aligned.
				 * Thus adding 2-byte offset before copying to
				 * new mbuf avoids unaligned copy and this may
				 * improve performance.
				 * As noted above, unaligned part has to be
				 * copied to txdesc buffer so this may cause
				 * extra copy ops, but for now MEC always
				 * requires some data in txdesc buffer,
				 * so we always have to copy some data anyway.
				 */
				m->m_data += ETHER_ALIGN;
				m_copydata(m0, 0, len, mtod(m, caddr_t));
				m->m_pkthdr.len = m->m_len = len;
				m_freem(m0);
				m0 = m;
				error = bus_dmamap_load_mbuf(sc->sc_dmat,
				    dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
				if (error) {
					printf("%s: unable to load TX buffer, "
					    "error = %d\n",
					    sc->sc_dev.dv_xname, error);
					m_freem(m);
					break;
				}
			}

			/* Handle unaligned part. */
			txdaddr = MEC_TXD_ROUNDUP(dmamap->dm_segs[0].ds_addr);
			txs->txs_flags = MEC_TXS_TXDPTR1;
			unaligned =
			    dmamap->dm_segs[0].ds_addr & (MEC_TXD_ALIGN - 1);
			DPRINTF(MEC_DEBUG_START,
			    ("mec_start: ds_addr = 0x%x, unaligned = %d\n",
			    (u_int)dmamap->dm_segs[0].ds_addr, unaligned));
			if (unaligned != 0) {
				buflen = MEC_TXD_ALIGN - unaligned;
				bufoff = MEC_TXD_BUFSTART(buflen);
				DPRINTF(MEC_DEBUG_START,
				    ("mec_start: unaligned, "
				    "buflen = %d, bufoff = %d\n",
				    buflen, bufoff));
				memcpy(txd->txd_buf + bufoff,
				    mtod(m0, caddr_t), buflen);
				txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
			}
#if 1
			else {
				/*
				 * XXX needs hardware info XXX
				 * It seems MEC always requires some data
				 * in txd_buf[] even if buffer is
				 * 8-byte aligned otherwise DMA abort error
				 * occurs later...
				 */
				buflen = MEC_TXD_ALIGN;
				bufoff = MEC_TXD_BUFSTART(buflen);
				memcpy(txd->txd_buf + bufoff,
				    mtod(m0, caddr_t), buflen);
				DPRINTF(MEC_DEBUG_START,
				    ("mec_start: aligned, "
				    "buflen = %d, bufoff = %d\n",
				    buflen, bufoff));
				txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
				txdaddr += MEC_TXD_ALIGN;
			}
#endif
			txdlen  = len - buflen;
			DPRINTF(MEC_DEBUG_START,
			    ("mec_start: txdaddr = 0x%llx, txdlen = %d\n",
			    txdaddr, txdlen));

			/*
			 * Sync the DMA map for TX mbuf.
			 *
			 * XXX unaligned part doesn't have to be sync'ed,
			 *     but it's harmless...
			 */
			bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
			    dmamap->dm_mapsize,	BUS_DMASYNC_PREWRITE);
		}

#if NBPFILTER > 0
		/*
		 * Pass packet to bpf if there is a listener.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
#endif

		/*
		 * Setup the transmit descriptor.
		 */

		/* TXINT bit will be set later on the last packet. */
		txd->txd_cmd = (len - 1);
		/* But also set TXINT bit on a half of TXDESC. */
		if (sc->sc_txpending == (MEC_NTXDESC / 2))
			txd->txd_cmd |= MEC_TXCMD_TXINT;

		if (txs->txs_flags & MEC_TXS_TXDBUF)
			txd->txd_cmd |= TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen);
		if (txs->txs_flags & MEC_TXS_TXDPTR1) {
			txd->txd_cmd |= MEC_TXCMD_PTR1;
			txd->txd_ptr[0] = TXPTR_LEN(txdlen - 1) | txdaddr;
			/*
			 * Store a pointer to the packet so we can
			 * free it later.
			 */
			txs->txs_mbuf = m0;
		} else {
			txd->txd_ptr[0] = 0;
			/*
			 * In this case all data are copied to buffer in txdesc,
			 * we can free TX mbuf here.
			 */
			m_freem(m0);
		}

		DPRINTF(MEC_DEBUG_START,
		    ("mec_start: txd_cmd = 0x%llx, txd_ptr = 0x%llx\n",
		    txd->txd_cmd, txd->txd_ptr[0]));
		DPRINTF(MEC_DEBUG_START,
		    ("mec_start: len = %d (0x%04x), buflen = %d (0x%02x)\n",
		    len, len, buflen, buflen));

		/* Sync TX descriptor. */
		MEC_TXDESCSYNC(sc, nexttx,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/* Advance the TX pointer. */
		sc->sc_txpending++;
		sc->sc_txlast = nexttx;
	}

	if (sc->sc_txpending == MEC_NTXDESC) {
		/* No more slots; notify upper layer. */
		ifp->if_flags |= IFF_OACTIVE;
	}

	if (sc->sc_txpending != opending) {
		/*
		 * Cause a TX interrupt to happen on the last packet
		 * we enqueued.
		 */
		sc->sc_txdesc[sc->sc_txlast].txd_cmd |= MEC_TXCMD_TXINT;
		MEC_TXCMDSYNC(sc, sc->sc_txlast,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/* Start TX. */
		bus_space_write_8(st, sh, MEC_TX_RING_PTR,
		    MEC_NEXTTX(sc->sc_txlast));

		/*
		 * If the transmitter was idle,
		 * reset the txdirty pointer and re-enable TX interrupt.
		 */
		if (opending == 0) {
			sc->sc_txdirty = firsttx;
			bus_space_write_8(st, sh, MEC_TX_ALIAS,
			    MEC_TX_ALIAS_INT_ENABLE);
		}

		/* Set a watchdog timer in case the chip flakes out. */
		ifp->if_timer = 5;
	}
}
Ejemplo n.º 10
0
/*
 * sonic_start:		[ifnet interface function]
 *
 *	Start packet transmission on the interface.
 */
void
sonic_start(struct ifnet *ifp)
{
	struct sonic_softc *sc = ifp->if_softc;
	struct mbuf *m0, *m;
	struct sonic_tda16 *tda16;
	struct sonic_tda32 *tda32;
	struct sonic_descsoft *ds;
	bus_dmamap_t dmamap;
	int error, olasttx, nexttx, opending, totlen, olseg;
	int seg = 0;	/* XXX: gcc */

	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/*
	 * Remember the previous txpending and the current "last txdesc
	 * used" index.
	 */
	opending = sc->sc_txpending;
	olasttx = sc->sc_txlast;

	/*
	 * Loop through the send queue, setting up transmit descriptors
	 * until we drain the queue, or use up all available transmit
	 * descriptors.  Leave one at the end for sanity's sake.
	 */
	while (sc->sc_txpending < (SONIC_NTXDESC - 1)) {
		/*
		 * Grab a packet off the queue.
		 */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;
		m = NULL;

		/*
		 * Get the next available transmit descriptor.
		 */
		nexttx = SONIC_NEXTTX(sc->sc_txlast);
		ds = &sc->sc_txsoft[nexttx];
		dmamap = ds->ds_dmamap;

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the allotted number of frags, or we were
		 * short on resources.  In this case, we'll copy and try
		 * again.
		 */
		if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
		    BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 ||
		    (m0->m_pkthdr.len < ETHER_PAD_LEN &&
		    dmamap->dm_nsegs == SONIC_NTXFRAGS)) {
			if (error == 0)
				bus_dmamap_unload(sc->sc_dmat, dmamap);
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: unable to allocate Tx mbuf\n",
				    device_xname(sc->sc_dev));
				break;
			}
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_DONTWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					printf("%s: unable to allocate Tx "
					    "cluster\n",
					    device_xname(sc->sc_dev));
					m_freem(m);
					break;
				}
			}
			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
			if (error) {
				printf("%s: unable to load Tx buffer, "
				    "error = %d\n", device_xname(sc->sc_dev),
				    error);
				m_freem(m);
				break;
			}
		}
		IFQ_DEQUEUE(&ifp->if_snd, m0);
		if (m != NULL) {
			m_freem(m0);
			m0 = m;
		}

		/*
		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
		 */

		/* Sync the DMA map. */
		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
		    BUS_DMASYNC_PREWRITE);

		/*
		 * Store a pointer to the packet so we can free it later.
		 */
		ds->ds_mbuf = m0;

		/*
		 * Initialize the transmit descriptor.
		 */
		totlen = 0;
		if (sc->sc_32bit) {
			tda32 = &sc->sc_tda32[nexttx];
			for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
				tda32->tda_frags[seg].frag_ptr1 =
				    htosonic32(sc,
				    (dmamap->dm_segs[seg].ds_addr >> 16) &
				    0xffff);
				tda32->tda_frags[seg].frag_ptr0 =
				    htosonic32(sc,
				    dmamap->dm_segs[seg].ds_addr & 0xffff);
				tda32->tda_frags[seg].frag_size =
				    htosonic32(sc, dmamap->dm_segs[seg].ds_len);
				totlen += dmamap->dm_segs[seg].ds_len;
			}
			if (totlen < ETHER_PAD_LEN) {
				tda32->tda_frags[seg].frag_ptr1 =
				    htosonic32(sc,
				    (sc->sc_nulldma >> 16) & 0xffff);
				tda32->tda_frags[seg].frag_ptr0 =
				    htosonic32(sc, sc->sc_nulldma & 0xffff);
				tda32->tda_frags[seg].frag_size =
				    htosonic32(sc, ETHER_PAD_LEN - totlen);
				totlen = ETHER_PAD_LEN;
				seg++;
			}

			tda32->tda_status = 0;
			tda32->tda_pktconfig = 0;
			tda32->tda_pktsize = htosonic32(sc, totlen);
			tda32->tda_fragcnt = htosonic32(sc, seg);

			/* Link it up. */
			tda32->tda_frags[seg].frag_ptr0 =
			    htosonic32(sc, SONIC_CDTXADDR32(sc,
			    SONIC_NEXTTX(nexttx)) & 0xffff);

			/* Sync the Tx descriptor. */
			SONIC_CDTXSYNC32(sc, nexttx,
			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
		} else {
Ejemplo n.º 11
0
/*
 * ae_start:		[ifnet interface function]
 *
 *	Start packet transmission on the interface.
 */
static void
ae_start(struct ifnet *ifp)
{
	struct ae_softc *sc = ifp->if_softc;
	struct mbuf *m0, *m;
	struct ae_txsoft *txs;
	bus_dmamap_t dmamap;
	int error, firsttx, nexttx, lasttx = 1, ofree, seg;

	DPRINTF(sc, ("%s: ae_start: sc_flags 0x%08x, if_flags 0x%08x\n",
	    device_xname(sc->sc_dev), sc->sc_flags, ifp->if_flags));


	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/*
	 * Remember the previous number of free descriptors and
	 * the first descriptor we'll use.
	 */
	ofree = sc->sc_txfree;
	firsttx = sc->sc_txnext;

	DPRINTF(sc, ("%s: ae_start: txfree %d, txnext %d\n",
	    device_xname(sc->sc_dev), ofree, firsttx));

	/*
	 * Loop through the send queue, setting up transmit descriptors
	 * until we drain the queue, or use up all available transmit
	 * descriptors.
	 */
	while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
	       sc->sc_txfree != 0) {
		/*
		 * Grab a packet off the queue.
		 */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;
		m = NULL;

		dmamap = txs->txs_dmamap;

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we were
		 * short on resources.  In this case, we'll copy and try
		 * again.
		 */
		if (((mtod(m0, uintptr_t) & 3) != 0) ||
		    bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
		      BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: unable to allocate Tx mbuf\n",
				    device_xname(sc->sc_dev));
				break;
			}
			MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_DONTWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					printf("%s: unable to allocate Tx "
					    "cluster\n", device_xname(sc->sc_dev));
					m_freem(m);
					break;
				}
			}
			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
			if (error) {
				printf("%s: unable to load Tx buffer, "
				    "error = %d\n", device_xname(sc->sc_dev),
				    error);
				break;
			}
		}

		/*
		 * Ensure we have enough descriptors free to describe
		 * the packet.
		 */
		if (dmamap->dm_nsegs > sc->sc_txfree) {
			/*
			 * Not enough free descriptors to transmit this
			 * packet.  We haven't committed to anything yet,
			 * so just unload the DMA map, put the packet
			 * back on the queue, and punt.  Notify the upper
			 * layer that there are no more slots left.
			 *
			 * XXX We could allocate an mbuf and copy, but
			 * XXX it is worth it?
			 */
			ifp->if_flags |= IFF_OACTIVE;
			bus_dmamap_unload(sc->sc_dmat, dmamap);
			if (m != NULL)
				m_freem(m);
			break;
		}

		IFQ_DEQUEUE(&ifp->if_snd, m0);
		if (m != NULL) {
			m_freem(m0);
			m0 = m;
		}

		/*
		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
		 */

		/* Sync the DMA map. */
		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
		    BUS_DMASYNC_PREWRITE);

		/*
		 * Initialize the transmit descriptors.
		 */
		for (nexttx = sc->sc_txnext, seg = 0;
		     seg < dmamap->dm_nsegs;
		     seg++, nexttx = AE_NEXTTX(nexttx)) {
			/*
			 * If this is the first descriptor we're
			 * enqueueing, don't set the OWN bit just
			 * yet.  That could cause a race condition.
			 * We'll do it below.
			 */
			sc->sc_txdescs[nexttx].ad_status =
			    (nexttx == firsttx) ? 0 : ADSTAT_OWN;
			sc->sc_txdescs[nexttx].ad_bufaddr1 =
			    dmamap->dm_segs[seg].ds_addr;
			sc->sc_txdescs[nexttx].ad_ctl =
			    (dmamap->dm_segs[seg].ds_len <<
				ADCTL_SIZE1_SHIFT) |
				(nexttx == (AE_NTXDESC - 1) ?
				    ADCTL_ER : 0);
			lasttx = nexttx;
		}

		KASSERT(lasttx != -1);

		/* Set `first segment' and `last segment' appropriately. */
		sc->sc_txdescs[sc->sc_txnext].ad_ctl |= ADCTL_Tx_FS;
		sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_LS;

#ifdef AE_DEBUG
		if (ifp->if_flags & IFF_DEBUG) {
			printf("     txsoft %p transmit chain:\n", txs);
			for (seg = sc->sc_txnext;; seg = AE_NEXTTX(seg)) {
				printf("     descriptor %d:\n", seg);
				printf("       ad_status:   0x%08x\n",
				    sc->sc_txdescs[seg].ad_status);
				printf("       ad_ctl:      0x%08x\n",
				    sc->sc_txdescs[seg].ad_ctl);
				printf("       ad_bufaddr1: 0x%08x\n",
				    sc->sc_txdescs[seg].ad_bufaddr1);
				printf("       ad_bufaddr2: 0x%08x\n",
				    sc->sc_txdescs[seg].ad_bufaddr2);
				if (seg == lasttx)
					break;
			}
		}
#endif

		/* Sync the descriptors we're using. */
		AE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/*
		 * Store a pointer to the packet so we can free it later,
		 * and remember what txdirty will be once the packet is
		 * done.
		 */
		txs->txs_mbuf = m0;
		txs->txs_firstdesc = sc->sc_txnext;
		txs->txs_lastdesc = lasttx;
		txs->txs_ndescs = dmamap->dm_nsegs;

		/* Advance the tx pointer. */
		sc->sc_txfree -= dmamap->dm_nsegs;
		sc->sc_txnext = nexttx;

		SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
		SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);

		/*
		 * Pass the packet to any BPF listeners.
		 */
		bpf_mtap(ifp, m0);
	}
Ejemplo n.º 12
0
/*
 * ste_start:		[ifnet interface function]
 *
 *	Start packet transmission on the interface.
 */
static void
ste_start(struct ifnet *ifp)
{
	struct ste_softc *sc = ifp->if_softc;
	struct mbuf *m0, *m;
	struct ste_descsoft *ds;
	struct ste_tfd *tfd;
	bus_dmamap_t dmamap;
	int error, olasttx, nexttx, opending, seg, totlen;

	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/*
	 * Remember the previous number of pending transmissions
	 * and the current last descriptor in the list.
	 */
	opending = sc->sc_txpending;
	olasttx = sc->sc_txlast;

	/*
	 * Loop through the send queue, setting up transmit descriptors
	 * until we drain the queue, or use up all available transmit
	 * descriptors.
	 */
	while (sc->sc_txpending < STE_NTXDESC) {
		/*
		 * Grab a packet off the queue.
		 */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;
		m = NULL;

		/*
		 * Get the last and next available transmit descriptor.
		 */
		nexttx = STE_NEXTTX(sc->sc_txlast);
		tfd = &sc->sc_txdescs[nexttx];
		ds = &sc->sc_txsoft[nexttx];

		dmamap = ds->ds_dmamap;

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we
		 * were short on resources.  In this case, we'll copy
		 * and try again.
		 */
		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
		    BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: unable to allocate Tx mbuf\n",
				    device_xname(&sc->sc_dev));
				break;
			}
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_DONTWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					printf("%s: unable to allocate Tx "
					    "cluster\n", device_xname(&sc->sc_dev));
					m_freem(m);
					break;
				}
			}
			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
			if (error) {
				printf("%s: unable to load Tx buffer, "
				    "error = %d\n", device_xname(&sc->sc_dev), error);
				break;
			}
		}

		IFQ_DEQUEUE(&ifp->if_snd, m0);
		if (m != NULL) {
			m_freem(m0);
			m0 = m;
		}

		/*
		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
		 */

		/* Sync the DMA map. */
		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
		    BUS_DMASYNC_PREWRITE);

		/* Initialize the fragment list. */
		for (totlen = 0, seg = 0; seg < dmamap->dm_nsegs; seg++) {
			tfd->tfd_frags[seg].frag_addr =
			    htole32(dmamap->dm_segs[seg].ds_addr);
			tfd->tfd_frags[seg].frag_len =
			    htole32(dmamap->dm_segs[seg].ds_len);
			totlen += dmamap->dm_segs[seg].ds_len;
		}
		tfd->tfd_frags[seg - 1].frag_len |= htole32(FRAG_LAST);

		/* Initialize the descriptor. */
		tfd->tfd_next = htole32(STE_CDTXADDR(sc, nexttx));
		tfd->tfd_control = htole32(TFD_FrameId(nexttx) | (totlen & 3));

		/* Sync the descriptor. */
		STE_CDTXSYNC(sc, nexttx,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/*
		 * Store a pointer to the packet so we can free it later,
		 * and remember what txdirty will be once the packet is
		 * done.
		 */
		ds->ds_mbuf = m0;

		/* Advance the tx pointer. */
		sc->sc_txpending++;
		sc->sc_txlast = nexttx;

#if NBPFILTER > 0
		/*
		 * Pass the packet to any BPF listeners.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m0);
#endif /* NBPFILTER > 0 */
	}
Ejemplo n.º 13
0
/*
 * Start output on interface.
 */
void
zestart(struct ifnet *ifp)
{
	struct ze_softc *sc = ifp->if_softc;
	struct ze_cdata *zc = sc->sc_zedata;
	paddr_t	buffer;
	struct mbuf *m;
	int nexttx, starttx;
	int len, i, totlen, error;
	int old_inq = sc->sc_inq;
	uint16_t orword, tdr;
	bus_dmamap_t map;

	while (sc->sc_inq < (TXDESCS - 1)) {

		if (sc->sc_setup) {
			ze_setup(sc);
			continue;
		}
		nexttx = sc->sc_nexttx;
		IFQ_POLL(&sc->sc_if.if_snd, m);
		if (m == 0)
			goto out;
		/*
		 * Count number of mbufs in chain.
		 * Always do DMA directly from mbufs, therefore the transmit
		 * ring is really big.
		 */
		map = sc->sc_xmtmap[nexttx];
		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
		    BUS_DMA_WRITE);
		if (error) {
			aprint_error_dev(sc->sc_dev,
			    "zestart: load_mbuf failed: %d", error);
			goto out;
		}

		if (map->dm_nsegs >= TXDESCS)
			panic("zestart"); /* XXX */

		if ((map->dm_nsegs + sc->sc_inq) >= (TXDESCS - 1)) {
			bus_dmamap_unload(sc->sc_dmat, map);
			ifp->if_flags |= IFF_OACTIVE;
			goto out;
		}

		/*
		 * m now points to a mbuf chain that can be loaded.
		 * Loop around and set it.
		 */
		totlen = 0;
		orword = ZE_TDES1_FS;
		starttx = nexttx;
		for (i = 0; i < map->dm_nsegs; i++) {
			buffer = map->dm_segs[i].ds_addr;
			len = map->dm_segs[i].ds_len;

			KASSERT(len > 0);

			totlen += len;
			/* Word alignment calc */
			if (totlen == m->m_pkthdr.len) {
				sc->sc_txcnt += map->dm_nsegs;
				if (sc->sc_txcnt >= TXDESCS * 3 / 4) {
					orword |= ZE_TDES1_IC;
					sc->sc_txcnt = 0;
				}
				orword |= ZE_TDES1_LS;
				sc->sc_txmbuf[nexttx] = m;
			}
			zc->zc_xmit[nexttx].ze_bufsize = len;
			zc->zc_xmit[nexttx].ze_bufaddr = (char *)buffer;
			zc->zc_xmit[nexttx].ze_tdes1 = orword;
			zc->zc_xmit[nexttx].ze_tdr = tdr;

			if (++nexttx == TXDESCS)
				nexttx = 0;
			orword = 0;
			tdr = ZE_TDR_OW;
		}

		sc->sc_inq += map->dm_nsegs;

		IFQ_DEQUEUE(&ifp->if_snd, m);
#ifdef DIAGNOSTIC
		if (totlen != m->m_pkthdr.len)
			panic("zestart: len fault");
#endif
		/*
		 * Turn ownership of the packet over to the device.
		 */
		zc->zc_xmit[starttx].ze_tdr = ZE_TDR_OW;

		/*
		 * Kick off the transmit logic, if it is stopped.
		 */
		if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
			ZE_WCSR(ZE_CSR1, -1);
		sc->sc_nexttx = nexttx;
	}
	if (sc->sc_inq == (TXDESCS - 1))
		ifp->if_flags |= IFF_OACTIVE;

out:	if (old_inq < sc->sc_inq)
		ifp->if_timer = 5; /* If transmit logic dies */
}
Ejemplo n.º 14
0
void
cpsw_start(struct ifnet *ifp)
{
	struct cpsw_softc * const sc = ifp->if_softc;
	struct cpsw_ring_data * const rdp = sc->sc_rdp;
	struct cpsw_cpdma_bd bd;
	struct mbuf *m;
	bus_dmamap_t dm;
	u_int eopi = ~0;
	u_int seg;
	u_int txfree;
	int txstart = -1;
	int error;
	bool pad;
	u_int mlen;

	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
	    ISSET(ifp->if_flags, IFF_OACTIVE) ||
	    IFQ_IS_EMPTY(&ifp->if_snd))
		return;

	if (sc->sc_txnext >= sc->sc_txhead)
		txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext;
	else
		txfree = sc->sc_txhead - sc->sc_txnext - 1;

	for (;;) {
		if (txfree <= CPSW_TXFRAGS) {
			SET(ifp->if_flags, IFF_OACTIVE);
			break;
		}

		IFQ_POLL(&ifp->if_snd, m);
		if (m == NULL)
			break;

		IFQ_DEQUEUE(&ifp->if_snd, m);

		dm = rdp->tx_dm[sc->sc_txnext];
		error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT);
		switch (error) {
		case 0:
			break;

		case EFBIG: /* mbuf chain is too fragmented */
			if (m_defrag(m, M_DONTWAIT) == 0 &&
			    bus_dmamap_load_mbuf(sc->sc_bdt, dm, m,
			    BUS_DMA_NOWAIT) == 0)
				break;

			/* FALLTHROUGH */
		default:
			m_freem(m);
			ifp->if_oerrors++;
			continue;
		}

		mlen = dm->dm_mapsize;
		pad = mlen < CPSW_PAD_LEN;

		KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL);
		rdp->tx_mb[sc->sc_txnext] = m;

#if NBPFILTER > 0
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif

		bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
		    BUS_DMASYNC_PREWRITE);

		if (txstart == -1)
			txstart = sc->sc_txnext;
		eopi = sc->sc_txnext;
		for (seg = 0; seg < dm->dm_nsegs; seg++) {
			bd.next = cpsw_txdesc_paddr(sc,
			    TXDESC_NEXT(sc->sc_txnext));
			bd.bufptr = dm->dm_segs[seg].ds_addr;
			bd.bufoff = 0;
			bd.buflen = dm->dm_segs[seg].ds_len;
			bd.pktlen = 0;
			bd.flags = 0;

			if (seg == 0) {
				bd.flags = CPDMA_BD_OWNER | CPDMA_BD_SOP;
				bd.pktlen = MAX(mlen, CPSW_PAD_LEN);
			}

			if (seg == dm->dm_nsegs - 1 && !pad)
				bd.flags |= CPDMA_BD_EOP;

			cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
			txfree--;
			eopi = sc->sc_txnext;
			sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
		}
		if (pad) {
			bd.next = cpsw_txdesc_paddr(sc,
			    TXDESC_NEXT(sc->sc_txnext));
			bd.bufptr = sc->sc_txpad_pa;
			bd.bufoff = 0;
			bd.buflen = CPSW_PAD_LEN - mlen;
			bd.pktlen = 0;
			bd.flags = CPDMA_BD_EOP;

			cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
			txfree--;
			eopi = sc->sc_txnext;
			sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
		}
	}

	if (txstart >= 0) {
		ifp->if_timer = 5;
		/* terminate the new chain */
		KASSERT(eopi == TXDESC_PREV(sc->sc_txnext));
		cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0);
		
		/* link the new chain on */
		cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart),
		    cpsw_txdesc_paddr(sc, txstart));
		if (sc->sc_txeoq) {
			/* kick the dma engine */
			sc->sc_txeoq = false;
			bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0),
			    cpsw_txdesc_paddr(sc, txstart));
		}
	}
}
Ejemplo n.º 15
0
/*
 * Start packet transmission on the interface.
 * [ifnet interface function]
 */
void
epic_start(struct ifnet *ifp)
{
	struct epic_softc *sc = ifp->if_softc;
	struct mbuf *m0, *m;
	struct epic_txdesc *txd;
	struct epic_descsoft *ds;
	struct epic_fraglist *fr;
	bus_dmamap_t dmamap;
	int error, firsttx, nexttx, opending, seg;
	u_int len;

	/*
	 * Remember the previous txpending and the first transmit
	 * descriptor we use.
	 */
	opending = sc->sc_txpending;
	firsttx = EPIC_NEXTTX(sc->sc_txlast);

	/*
	 * Loop through the send queue, setting up transmit descriptors
	 * until we drain the queue, or use up all available transmit
	 * descriptors.
	 */
	while (sc->sc_txpending < EPIC_NTXDESC) {
		/*
		 * Grab a packet off the queue.
		 */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;
		m = NULL;

		/*
		 * Get the last and next available transmit descriptor.
		 */
		nexttx = EPIC_NEXTTX(sc->sc_txlast);
		txd = EPIC_CDTX(sc, nexttx);
		fr = EPIC_CDFL(sc, nexttx);
		ds = EPIC_DSTX(sc, nexttx);
		dmamap = ds->ds_dmamap;

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of frags, or we were
		 * short on resources.  In this case, we'll copy and try
		 * again.
		 */
		if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
		    BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 ||
		    (m0->m_pkthdr.len < ETHER_PAD_LEN &&
		    dmamap-> dm_nsegs == EPIC_NFRAGS)) {
			if (error == 0)
				bus_dmamap_unload(sc->sc_dmat, dmamap);

			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL)
				break;
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_DONTWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					m_freem(m);
					break;
				}
			}
			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
			if (error)
				break;
		}
		IFQ_DEQUEUE(&ifp->if_snd, m0);
		if (m != NULL) {
			m_freem(m0);
			m0 = m;
		}

		/* Initialize the fraglist. */
		for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
			fr->ef_frags[seg].ef_addr =
			    dmamap->dm_segs[seg].ds_addr;
			fr->ef_frags[seg].ef_length =
			    dmamap->dm_segs[seg].ds_len;
		}
		len = m0->m_pkthdr.len;
		if (len < ETHER_PAD_LEN) {
			fr->ef_frags[seg].ef_addr = sc->sc_nulldma;
			fr->ef_frags[seg].ef_length = ETHER_PAD_LEN - len;
			len = ETHER_PAD_LEN;
			seg++;
		}
		fr->ef_nfrags = seg;

		EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);

		/* Sync the DMA map. */
		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
		    BUS_DMASYNC_PREWRITE);

		/*
		 * Store a pointer to the packet so we can free it later.
		 */
		ds->ds_mbuf = m0;

		/*
		 * Fill in the transmit descriptor.
		 */
		txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;

		/*
		 * If this is the first descriptor we're enqueueing,
		 * don't give it to the EPIC yet.  That could cause
		 * a race condition.  We'll do it below.
		 */
		if (nexttx == firsttx)
			txd->et_txstatus = TXSTAT_TXLENGTH(len);
		else
			txd->et_txstatus =
			    TXSTAT_TXLENGTH(len) | ET_TXSTAT_OWNER;

		EPIC_CDTXSYNC(sc, nexttx,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/* Advance the tx pointer. */
		sc->sc_txpending++;
		sc->sc_txlast = nexttx;

#if NBPFILTER > 0
		/*
		 * Pass the packet to any BPF listeners.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
#endif
	}

	if (sc->sc_txpending == EPIC_NTXDESC) {
		/* No more slots left; notify upper layer. */
		ifp->if_flags |= IFF_OACTIVE;
	}

	if (sc->sc_txpending != opending) {
		/*
		 * We enqueued packets.  If the transmitter was idle,
		 * reset the txdirty pointer.
		 */
		if (opending == 0)
			sc->sc_txdirty = firsttx;

		/*
		 * Cause a transmit interrupt to happen on the
		 * last packet we enqueued.
		 */
		EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
		EPIC_CDTXSYNC(sc, sc->sc_txlast,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/*
		 * The entire packet chain is set up.  Give the
		 * first descriptor to the EPIC now.
		 */
		EPIC_CDTX(sc, firsttx)->et_txstatus |= ET_TXSTAT_OWNER;
		EPIC_CDTXSYNC(sc, firsttx,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/* Start the transmitter. */
		bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
		    COMMAND_TXQUEUED);

		/* Set a watchdog timer in case the chip flakes out. */
		ifp->if_timer = 5;
	}
}
Ejemplo n.º 16
0
static void
pdq_ifstart_locked(struct ifnet *ifp)
{
    pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
    struct mbuf *m;
    int tx = 0;

    PDQ_LOCK_ASSERT(sc);
    if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
	return;

    if (sc->timer == 0)
	sc->timer = PDQ_OS_TX_TIMEOUT;

    if ((sc->sc_pdq->pdq_flags & PDQ_TXOK) == 0) {
	PDQ_IFNET(sc)->if_drv_flags |= IFF_DRV_OACTIVE;
	return;
    }
    sc->sc_flags |= PDQIF_DOWNCALL;
    for (;; tx = 1) {
	IF_DEQUEUE(&ifp->if_snd, m);
	if (m == NULL)
	    break;
#if defined(PDQ_BUS_DMA) && !defined(PDQ_BUS_DMA_NOTX)
	if ((m->m_flags & M_HASTXDMAMAP) == 0) {
	    bus_dmamap_t map;
	    if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
		m->m_data[0] = PDQ_FDDI_PH0;
		m->m_data[1] = PDQ_FDDI_PH1;
		m->m_data[2] = PDQ_FDDI_PH2;
	    }
	    if (!bus_dmamap_create(sc->sc_dmatag, m->m_pkthdr.len, 255,
				   m->m_pkthdr.len, 0, BUS_DMA_NOWAIT, &map)) {
		if (!bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
					  BUS_DMA_WRITE|BUS_DMA_NOWAIT)) {
		    bus_dmamap_sync(sc->sc_dmatag, map, 0, m->m_pkthdr.len,
				    BUS_DMASYNC_PREWRITE);
		    M_SETCTX(m, map);
		    m->m_flags |= M_HASTXDMAMAP;
		}
	    }
	    if ((m->m_flags & M_HASTXDMAMAP) == 0)
		break;
	}
#else
	if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
	    m->m_data[0] = PDQ_FDDI_PH0;
	    m->m_data[1] = PDQ_FDDI_PH1;
	    m->m_data[2] = PDQ_FDDI_PH2;
	}
#endif

	if (pdq_queue_transmit_data(sc->sc_pdq, m) == PDQ_FALSE)
	    break;
    }
    if (m != NULL) {
	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
	IF_PREPEND(&ifp->if_snd, m);
    }
    if (tx)
	PDQ_DO_TYPE2_PRODUCER(sc->sc_pdq);
    sc->sc_flags &= ~PDQIF_DOWNCALL;
}
Ejemplo n.º 17
0
void
sq_start(struct ifnet *ifp)
{
	struct sq_softc *sc = ifp->if_softc;
	uint32_t status;
	struct mbuf *m0, *m;
	bus_dmamap_t dmamap;
	int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;

	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/*
	 * Remember the previous number of free descriptors and
	 * the first descriptor we'll use.
	 */
	ofree = sc->sc_nfreetx;
	firsttx = sc->sc_nexttx;

	/*
	 * Loop through the send queue, setting up transmit descriptors
	 * until we drain the queue, or use up all available transmit
	 * descriptors.
	 */
	while (sc->sc_nfreetx != 0) {
		/*
		 * Grab a packet off the queue.
		 */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;
		m = NULL;

		dmamap = sc->sc_txmap[sc->sc_nexttx];

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we were
		 * short on resources.  In this case, we'll copy and try
		 * again.
		 * Also copy it if we need to pad, so that we are sure there
		 * is room for the pad buffer.
		 * XXX the right way of doing this is to use a static buffer
		 * for padding and adding it to the transmit descriptor (see
		 * sys/dev/pci/if_tl.c for example). We can't do this here yet
		 * because we can't send packets with more than one fragment.
		 */
		if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
		    bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
		    BUS_DMA_NOWAIT) != 0) {
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: unable to allocate Tx mbuf\n",
				    device_xname(sc->sc_dev));
				break;
			}
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_DONTWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					printf("%s: unable to allocate Tx "
					    "cluster\n",
					    device_xname(sc->sc_dev));
					m_freem(m);
					break;
				}
			}

			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
			if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
				memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
				    ETHER_PAD_LEN - m0->m_pkthdr.len);
				m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
			} else
Ejemplo n.º 18
0
/* initiate output routine */
void
iee_start(struct ifnet *ifp)
{
	struct iee_softc *sc = ifp->if_softc;
	struct mbuf *m = NULL;
	struct iee_tbd *tbd;
	int t;
	int n;

	if (sc->sc_next_cb != 0)
		/* There is already a CMD running. Defer packet enqueuing. */
		return;
	for (t = 0 ; t < IEE_NCB ; t++) {
		IFQ_DEQUEUE(&ifp->if_snd, sc->sc_tx_mbuf[t]);
		if (sc->sc_tx_mbuf[t] == NULL)
			break;
		if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
		    sc->sc_tx_mbuf[t], BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
			/*
			 * The packet needs more TBD than we support.
			 * Copy the packet into a mbuf cluster to get it out.
			 */
			printf("%s: iee_start: failed to load DMA map\n",
			    device_xname(sc->sc_dev));
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: iee_start: can't allocate mbuf\n",
				    device_xname(sc->sc_dev));
				m_freem(sc->sc_tx_mbuf[t]);
				t--;
				continue;
			}
			MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
			MCLGET(m, M_DONTWAIT);
			if ((m->m_flags & M_EXT) == 0) {
				printf("%s: iee_start: can't allocate mbuf "
				    "cluster\n", device_xname(sc->sc_dev));
				m_freem(sc->sc_tx_mbuf[t]);
				m_freem(m);
				t--;
				continue;
			}
			m_copydata(sc->sc_tx_mbuf[t], 0,
			    sc->sc_tx_mbuf[t]->m_pkthdr.len, mtod(m, void *));
			m->m_pkthdr.len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
			m->m_len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
			m_freem(sc->sc_tx_mbuf[t]);
			sc->sc_tx_mbuf[t] = m;
			if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
		    	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
				printf("%s: iee_start: can't load TX DMA map\n",
				    device_xname(sc->sc_dev));
				m_freem(sc->sc_tx_mbuf[t]);
				t--;
				continue;
			}
		}
		for (n = 0 ; n < sc->sc_tx_map[t]->dm_nsegs ; n++) {
			tbd = SC_TBD(sc, sc->sc_next_tbd + n);
			tbd->tbd_tb_addr =
			    IEE_SWAPA32(sc->sc_tx_map[t]->dm_segs[n].ds_addr);
			tbd->tbd_size =
			    sc->sc_tx_map[t]->dm_segs[n].ds_len;
			tbd->tbd_link_addr =
			    IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_tbd_off +
			    sc->sc_tbd_sz * (sc->sc_next_tbd + n + 1)));
		}
		SC_TBD(sc, sc->sc_next_tbd + n - 1)->tbd_size |= IEE_CB_EL;
		bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
		    sc->sc_tbd_off + sc->sc_next_tbd * sc->sc_tbd_sz,
		    sc->sc_tbd_sz * sc->sc_tx_map[t]->dm_nsegs,
		    BUS_DMASYNC_PREWRITE);
		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_map[t], 0,
		    sc->sc_tx_map[t]->dm_mapsize, BUS_DMASYNC_PREWRITE);
		IFQ_POLL(&ifp->if_snd, m);
		if (m == NULL)
			iee_cb_setup(sc, IEE_CB_CMD_TR | IEE_CB_S | IEE_CB_EL
			    | IEE_CB_I);
		else
			iee_cb_setup(sc, IEE_CB_CMD_TR);
		sc->sc_next_tbd += n;
		/* Pass packet to bpf if someone listens. */
		bpf_mtap(ifp, sc->sc_tx_mbuf[t]);
	}
Ejemplo n.º 19
0
/*
 * How frame reception is done:
 * Each Receive Frame Descriptor has one associated Receive Buffer Descriptor.
 * Each RBD points to the data area of an mbuf cluster. The RFDs are linked
 * together in a circular list. sc->sc_rx_done is the count of RFDs in the
 * list already processed / the number of the RFD that has to be checked for
 * a new frame first at the next RX interrupt. Upon successful reception of
 * a frame the mbuf cluster is handled to upper protocol layers, a new mbuf
 * cluster is allocated and the RFD / RBD are reinitialized accordingly.
 * 
 * When a RFD list overrun occurred the whole RFD and RBD lists are
 * reinitialized and frame reception is started again.
 */
int
iee_intr(void *intarg)
{
	struct iee_softc *sc = intarg;
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	struct iee_rfd *rfd;
	struct iee_rbd *rbd;
	bus_dmamap_t rx_map;
	struct mbuf *rx_mbuf;
	struct mbuf *new_mbuf;
	int scb_status;
	int scb_cmd;
	int n, col;
	uint16_t status, count, cmd;

	if ((ifp->if_flags & IFF_RUNNING) == 0) {
		(sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
		return 1;
	}
	IEE_SCBSYNC(sc, BUS_DMASYNC_POSTREAD);
	scb_status = SC_SCB(sc)->scb_status;
	scb_cmd = SC_SCB(sc)->scb_cmd;
	for (;;) {
		rfd = SC_RFD(sc, sc->sc_rx_done);
		IEE_RFDSYNC(sc, sc->sc_rx_done,
		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
		status = rfd->rfd_status;
		if ((status & IEE_RFD_C) == 0) {
			IEE_RFDSYNC(sc, sc->sc_rx_done, BUS_DMASYNC_PREREAD);
			break;
		}
		rfd->rfd_status = 0;
		IEE_RFDSYNC(sc, sc->sc_rx_done,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/* At least one packet was received. */
		rx_map = sc->sc_rx_map[sc->sc_rx_done];
		rx_mbuf = sc->sc_rx_mbuf[sc->sc_rx_done];
		IEE_RBDSYNC(sc, (sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD,
		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
		SC_RBD(sc, (sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD)->rbd_size
		    &= ~IEE_RBD_EL;
		IEE_RBDSYNC(sc, (sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
		rbd = SC_RBD(sc, sc->sc_rx_done);
		IEE_RBDSYNC(sc, sc->sc_rx_done,
		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
		count = rbd->rbd_count;
		if ((status & IEE_RFD_OK) == 0
		    || (count & IEE_RBD_EOF) == 0
		    || (count & IEE_RBD_F) == 0){
			/* Receive error, skip frame and reuse buffer. */
			rbd->rbd_count = 0;
			rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
			IEE_RBDSYNC(sc, sc->sc_rx_done,
			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
			printf("%s: iee_intr: receive error %d, rfd_status="
			    "0x%.4x, rfd_count=0x%.4x\n",
			    device_xname(sc->sc_dev),
			    ++sc->sc_rx_err, status, count);
			sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
			continue;
		}
		bus_dmamap_sync(sc->sc_dmat, rx_map, 0, rx_map->dm_mapsize,
		    BUS_DMASYNC_POSTREAD);
		rx_mbuf->m_pkthdr.len = rx_mbuf->m_len =
		    count & IEE_RBD_COUNT;
		rx_mbuf->m_pkthdr.rcvif = ifp;
		MGETHDR(new_mbuf, M_DONTWAIT, MT_DATA);
		if (new_mbuf == NULL) {
			printf("%s: iee_intr: can't allocate mbuf\n",
			    device_xname(sc->sc_dev));
			break;
		}
		MCLAIM(new_mbuf, &sc->sc_ethercom.ec_rx_mowner);
		MCLGET(new_mbuf, M_DONTWAIT);
		if ((new_mbuf->m_flags & M_EXT) == 0) {
			printf("%s: iee_intr: can't alloc mbuf cluster\n",
			    device_xname(sc->sc_dev));
			m_freem(new_mbuf);
			break;
		}
		bus_dmamap_unload(sc->sc_dmat, rx_map);
		new_mbuf->m_len = new_mbuf->m_pkthdr.len = MCLBYTES - 2;
		new_mbuf->m_data += 2;
		if (bus_dmamap_load_mbuf(sc->sc_dmat, rx_map,
		    new_mbuf, BUS_DMA_READ | BUS_DMA_NOWAIT) != 0)
			panic("%s: iee_intr: can't load RX DMA map\n",
			    device_xname(sc->sc_dev));
		bus_dmamap_sync(sc->sc_dmat, rx_map, 0,
		    rx_map->dm_mapsize, BUS_DMASYNC_PREREAD);
		bpf_mtap(ifp, rx_mbuf);
		(*ifp->if_input)(ifp, rx_mbuf);
		ifp->if_ipackets++;
		sc->sc_rx_mbuf[sc->sc_rx_done] = new_mbuf;
		rbd->rbd_count = 0;
		rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
		rbd->rbd_rb_addr = IEE_SWAPA32(rx_map->dm_segs[0].ds_addr);
		IEE_RBDSYNC(sc, sc->sc_rx_done,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
		sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
	}
	if ((scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR1
	    || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR2
	    || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR3) {
		/* Receive Overrun, reinit receive ring buffer. */
		for (n = 0 ; n < IEE_NRFD ; n++) {
			rfd = SC_RFD(sc, n);
			rbd = SC_RBD(sc, n);
			rfd->rfd_cmd = IEE_RFD_SF;
			rfd->rfd_link_addr =
			    IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rfd_off
			    + sc->sc_rfd_sz * ((n + 1) % IEE_NRFD)));
			rbd->rbd_next_rbd =
			    IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rbd_off
			    + sc->sc_rbd_sz * ((n + 1) % IEE_NRFD)));
			rbd->rbd_size = IEE_RBD_EL |
			    sc->sc_rx_map[n]->dm_segs[0].ds_len;
			rbd->rbd_rb_addr =
			    IEE_SWAPA32(sc->sc_rx_map[n]->dm_segs[0].ds_addr);
		}
		SC_RFD(sc, 0)->rfd_rbd_addr =
		    IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rbd_off));
		sc->sc_rx_done = 0;
		bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, sc->sc_rfd_off,
		    sc->sc_rfd_sz * IEE_NRFD + sc->sc_rbd_sz * IEE_NRFD,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
		(sc->sc_iee_cmd)(sc, IEE_SCB_RUC_ST);
		printf("%s: iee_intr: receive ring buffer overrun\n",
		    device_xname(sc->sc_dev));
	}

	if (sc->sc_next_cb != 0) {
		IEE_CBSYNC(sc, sc->sc_next_cb - 1,
		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
		status = SC_CB(sc, sc->sc_next_cb - 1)->cb_status;
		IEE_CBSYNC(sc, sc->sc_next_cb - 1,
		    BUS_DMASYNC_PREREAD);
		if ((status & IEE_CB_C) != 0) {
			/* CMD list finished */
			ifp->if_timer = 0;
			if (sc->sc_next_tbd != 0) {
				/* A TX CMD list finished, cleanup */
				for (n = 0 ; n < sc->sc_next_cb ; n++) {
					m_freem(sc->sc_tx_mbuf[n]);
					sc->sc_tx_mbuf[n] = NULL;
					bus_dmamap_unload(sc->sc_dmat,
					    sc->sc_tx_map[n]);
					IEE_CBSYNC(sc, n,
				    	    BUS_DMASYNC_POSTREAD|
					    BUS_DMASYNC_POSTWRITE);
					status = SC_CB(sc, n)->cb_status;
					IEE_CBSYNC(sc, n,
				    	    BUS_DMASYNC_PREREAD);
					if ((status & IEE_CB_COL) != 0 &&
					    (status & IEE_CB_MAXCOL) == 0)
						col = 16;
					else
						col = status
						    & IEE_CB_MAXCOL;
					sc->sc_tx_col += col;
					if ((status & IEE_CB_OK) != 0) {
						ifp->if_opackets++;
						ifp->if_collisions += col;
					}
				}
				sc->sc_next_tbd = 0;
				ifp->if_flags &= ~IFF_OACTIVE;
			}
			for (n = 0 ; n < sc->sc_next_cb; n++) {
				/*
				 * Check if a CMD failed, but ignore TX errors.
				 */
				IEE_CBSYNC(sc, n,
				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
				cmd = SC_CB(sc, n)->cb_cmd;
				status = SC_CB(sc, n)->cb_status;
				IEE_CBSYNC(sc, n, BUS_DMASYNC_PREREAD);
				if ((cmd & IEE_CB_CMD) != IEE_CB_CMD_TR &&
				    (status & IEE_CB_OK) == 0)
					printf("%s: iee_intr: scb_status=0x%x "
					    "scb_cmd=0x%x failed command %d: "
					    "cb_status[%d]=0x%.4x "
					    "cb_cmd[%d]=0x%.4x\n",
					    device_xname(sc->sc_dev),
					    scb_status, scb_cmd,
					    ++sc->sc_cmd_err,
					    n, status, n, cmd);
			}
			sc->sc_next_cb = 0;
			if ((sc->sc_flags & IEE_WANT_MCAST) != 0) {
				iee_cb_setup(sc, IEE_CB_CMD_MCS |
				    IEE_CB_S | IEE_CB_EL | IEE_CB_I);
				(sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
			} else
				/* Try to get deferred packets going. */
				iee_start(ifp);
		}
	}
	if (IEE_SWAP32(SC_SCB(sc)->scb_crc_err) != sc->sc_crc_err) {
		sc->sc_crc_err = IEE_SWAP32(SC_SCB(sc)->scb_crc_err);
		printf("%s: iee_intr: crc_err=%d\n", device_xname(sc->sc_dev),
		    sc->sc_crc_err);
	}
	if (IEE_SWAP32(SC_SCB(sc)->scb_align_err) != sc->sc_align_err) {
		sc->sc_align_err = IEE_SWAP32(SC_SCB(sc)->scb_align_err);
		printf("%s: iee_intr: align_err=%d\n", device_xname(sc->sc_dev),
		    sc->sc_align_err);
	}
	if (IEE_SWAP32(SC_SCB(sc)->scb_resource_err) != sc->sc_resource_err) {
		sc->sc_resource_err = IEE_SWAP32(SC_SCB(sc)->scb_resource_err);
		printf("%s: iee_intr: resource_err=%d\n",
		    device_xname(sc->sc_dev), sc->sc_resource_err);
	}
	if (IEE_SWAP32(SC_SCB(sc)->scb_overrun_err) != sc->sc_overrun_err) {
		sc->sc_overrun_err = IEE_SWAP32(SC_SCB(sc)->scb_overrun_err);
		printf("%s: iee_intr: overrun_err=%d\n",
		    device_xname(sc->sc_dev), sc->sc_overrun_err);
	}
	if (IEE_SWAP32(SC_SCB(sc)->scb_rcvcdt_err) != sc->sc_rcvcdt_err) {
		sc->sc_rcvcdt_err = IEE_SWAP32(SC_SCB(sc)->scb_rcvcdt_err);
		printf("%s: iee_intr: rcvcdt_err=%d\n",
		    device_xname(sc->sc_dev), sc->sc_rcvcdt_err);
	}
	if (IEE_SWAP32(SC_SCB(sc)->scb_short_fr_err) != sc->sc_short_fr_err) {
		sc->sc_short_fr_err = IEE_SWAP32(SC_SCB(sc)->scb_short_fr_err);
		printf("%s: iee_intr: short_fr_err=%d\n",
		    device_xname(sc->sc_dev), sc->sc_short_fr_err);
	}
	IEE_SCBSYNC(sc, BUS_DMASYNC_PREREAD);
	(sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
	return 1;
}
Ejemplo n.º 20
0
static void
rtk_start(struct ifnet *ifp)
{
	struct rtk_softc *sc;
	struct rtk_tx_desc *txd;
	struct mbuf *m_head, *m_new;
	int error, len;

	sc = ifp->if_softc;

	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL) {
		IFQ_POLL(&ifp->if_snd, m_head);
		if (m_head == NULL)
			break;
		m_new = NULL;

		/*
		 * Load the DMA map.  If this fails, the packet didn't
		 * fit in one DMA segment, and we need to copy.  Note,
		 * the packet must also be aligned.
		 * if the packet is too small, copy it too, so we're sure
		 * so have enough room for the pad buffer.
		 */
		if ((mtod(m_head, uintptr_t) & 3) != 0 ||
		    m_head->m_pkthdr.len < ETHER_PAD_LEN ||
		    bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmamap,
			m_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
			MGETHDR(m_new, M_DONTWAIT, MT_DATA);
			if (m_new == NULL) {
				printf("%s: unable to allocate Tx mbuf\n",
				    device_xname(sc->sc_dev));
				break;
			}
			if (m_head->m_pkthdr.len > MHLEN) {
				MCLGET(m_new, M_DONTWAIT);
				if ((m_new->m_flags & M_EXT) == 0) {
					printf("%s: unable to allocate Tx "
					    "cluster\n",
					    device_xname(sc->sc_dev));
					m_freem(m_new);
					break;
				}
			}
			m_copydata(m_head, 0, m_head->m_pkthdr.len,
			    mtod(m_new, void *));
			m_new->m_pkthdr.len = m_new->m_len =
			    m_head->m_pkthdr.len;
			if (m_head->m_pkthdr.len < ETHER_PAD_LEN) {
				memset(
				    mtod(m_new, char *) + m_head->m_pkthdr.len,
				    0, ETHER_PAD_LEN - m_head->m_pkthdr.len);
				m_new->m_pkthdr.len = m_new->m_len =
				    ETHER_PAD_LEN;
			}
			error = bus_dmamap_load_mbuf(sc->sc_dmat,
			    txd->txd_dmamap, m_new,
			    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
			if (error) {
				printf("%s: unable to load Tx buffer, "
				    "error = %d\n",
				    device_xname(sc->sc_dev), error);
				break;
			}
		}
Ejemplo n.º 21
0
void
nfe_rxeof(struct nfe_softc *sc)
{
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	struct nfe_desc32 *desc32;
	struct nfe_desc64 *desc64;
	struct nfe_rx_data *data;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m, *mnew;
	bus_addr_t physaddr;
#if NVLAN > 0
	uint32_t vtag;
#endif
	uint16_t flags;
	int error, len;

	for (;;) {
		data = &sc->rxq.data[sc->rxq.cur];

		if (sc->sc_flags & NFE_40BIT_ADDR) {
			desc64 = &sc->rxq.desc64[sc->rxq.cur];
			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);

			flags = letoh16(desc64->flags);
			len = letoh16(desc64->length) & 0x3fff;
#if NVLAN > 0
			vtag = letoh32(desc64->physaddr[1]);
#endif
		} else {
			desc32 = &sc->rxq.desc32[sc->rxq.cur];
			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);

			flags = letoh16(desc32->flags);
			len = letoh16(desc32->length) & 0x3fff;
		}

		if (flags & NFE_RX_READY)
			break;

		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
			if (!(flags & NFE_RX_VALID_V1))
				goto skip;

			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
				flags &= ~NFE_RX_ERROR;
				len--;	/* fix buffer length */
			}
		} else {
			if (!(flags & NFE_RX_VALID_V2))
				goto skip;

			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
				flags &= ~NFE_RX_ERROR;
				len--;	/* fix buffer length */
			}
		}

		if (flags & NFE_RX_ERROR) {
			ifp->if_ierrors++;
			goto skip;
		}

		/*
		 * Try to allocate a new mbuf for this ring element and load
		 * it before processing the current mbuf. If the ring element
		 * cannot be loaded, drop the received packet and reuse the
		 * old mbuf. In the unlikely case that the old mbuf can't be
		 * reloaded either, explicitly panic.
		 */
		mnew = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT);
		if (mnew == NULL) {
			ifp->if_ierrors++;
			goto skip;
		}
		mnew->m_pkthdr.len = mnew->m_len = MCLBYTES;

		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
		    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
		bus_dmamap_unload(sc->sc_dmat, data->map);

		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mnew,
		    BUS_DMA_READ | BUS_DMA_NOWAIT);
		if (error != 0) {
			m_freem(mnew);

			/* try to reload the old mbuf */
			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map,
			    m, BUS_DMA_READ | BUS_DMA_NOWAIT);
			if (error != 0) {
				/* very unlikely that it will fail.. */
				panic("%s: could not load old rx mbuf",
				    sc->sc_dev.dv_xname);
			}
			ifp->if_ierrors++;
			goto skip;
		}
		physaddr = data->map->dm_segs[0].ds_addr;

		/*
		 * New mbuf successfully loaded, update Rx ring and continue
		 * processing.
		 */
		m = data->m;
		data->m = mnew;

		/* finalize mbuf */
		m->m_pkthdr.len = m->m_len = len;

		if ((sc->sc_flags & NFE_HW_CSUM) &&
		    (flags & NFE_RX_IP_CSUMOK)) {
			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
			if (flags & NFE_RX_UDP_CSUMOK)
				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
			if (flags & NFE_RX_TCP_CSUMOK)
				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
		}

#if NVLAN > 0
		if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) {
			m->m_pkthdr.ether_vtag = vtag & 0xffff;
			m->m_flags |= M_VLANTAG;
		}
#endif

		ml_enqueue(&ml, m);

		/* update mapping address in h/w descriptor */
		if (sc->sc_flags & NFE_40BIT_ADDR) {
#if defined(__LP64__)
			desc64->physaddr[0] = htole32(physaddr >> 32);
#endif
			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
		} else {
Ejemplo n.º 22
0
int
ubsec_process(struct cryptop *crp)
{
	struct ubsec_q *q = NULL;
	int card, err = 0, i, j, s, nicealign;
	struct ubsec_softc *sc;
	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
	int encoffset = 0, macoffset = 0, cpskip, cpoffset;
	int sskip, dskip, stheend, dtheend;
	int16_t coffset;
	struct ubsec_session *ses, key;
	struct ubsec_dma *dmap = NULL;
	u_int16_t flags = 0;
	int ivlen = 0, keylen = 0;

	if (crp == NULL || crp->crp_callback == NULL) {
		ubsecstats.hst_invalid++;
		return (EINVAL);
	}
	card = UBSEC_CARD(crp->crp_sid);
	if (card >= ubsec_cd.cd_ndevs || ubsec_cd.cd_devs[card] == NULL) {
		ubsecstats.hst_invalid++;
		return (EINVAL);
	}

	sc = ubsec_cd.cd_devs[card];

	s = splnet();

	if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) {
		ubsecstats.hst_queuefull++;
		splx(s);
		err = ENOMEM;
		goto errout2;
	}

	q = SIMPLEQ_FIRST(&sc->sc_freequeue);
	SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next);
	splx(s);

	dmap = q->q_dma; /* Save dma pointer */
	bzero(q, sizeof(struct ubsec_q));
	bzero(&key, sizeof(key));

	q->q_sesn = UBSEC_SESSION(crp->crp_sid);
	q->q_dma = dmap;
	ses = &sc->sc_sessions[q->q_sesn];

	if (crp->crp_flags & CRYPTO_F_IMBUF) {
		q->q_src_m = (struct mbuf *)crp->crp_buf;
		q->q_dst_m = (struct mbuf *)crp->crp_buf;
	} else if (crp->crp_flags & CRYPTO_F_IOV) {
		q->q_src_io = (struct uio *)crp->crp_buf;
		q->q_dst_io = (struct uio *)crp->crp_buf;
	} else {
		err = EINVAL;
		goto errout;	/* XXX we don't handle contiguous blocks! */
	}

	bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr));

	dmap->d_dma->d_mcr.mcr_pkts = htole16(1);
	dmap->d_dma->d_mcr.mcr_flags = 0;
	q->q_crp = crp;

	crd1 = crp->crp_desc;
	if (crd1 == NULL) {
		err = EINVAL;
		goto errout;
	}
	crd2 = crd1->crd_next;

	if (crd2 == NULL) {
		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
		    crd1->crd_alg == CRYPTO_SHA1_HMAC) {
			maccrd = crd1;
			enccrd = NULL;
		} else if (crd1->crd_alg == CRYPTO_3DES_CBC ||
		    crd1->crd_alg == CRYPTO_AES_CBC) {
			maccrd = NULL;
			enccrd = crd1;
		} else {
			err = EINVAL;
			goto errout;
		}
	} else {
		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
		    crd1->crd_alg == CRYPTO_SHA1_HMAC) &&
		    (crd2->crd_alg == CRYPTO_3DES_CBC ||
		    crd2->crd_alg == CRYPTO_AES_CBC) &&
		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
			maccrd = crd1;
			enccrd = crd2;
		} else if ((crd1->crd_alg == CRYPTO_3DES_CBC ||
		    crd1->crd_alg == CRYPTO_AES_CBC) &&
		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
		    crd2->crd_alg == CRYPTO_SHA1_HMAC) &&
		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
			enccrd = crd1;
			maccrd = crd2;
		} else {
			/*
			 * We cannot order the ubsec as requested
			 */
			err = EINVAL;
			goto errout;
		}
	}

	if (enccrd) {
		if (enccrd->crd_alg == CRYPTO_AES_CBC) {
			if ((sc->sc_flags & UBS_FLAGS_AES) == 0) {
				err = EINVAL;
				goto errout;
			}
			flags |= htole16(UBS_PKTCTX_ENC_AES);
			switch (enccrd->crd_klen) {
			case 128:
			case 192:
			case 256:
				keylen = enccrd->crd_klen / 8;
				break;
			default:
				err = EINVAL;
				goto errout;
			}
			ivlen = 16;
		} else {
			flags |= htole16(UBS_PKTCTX_ENC_3DES);
			ivlen = 8;
			keylen = 24;
		}

		encoffset = enccrd->crd_skip;

		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
				bcopy(enccrd->crd_iv, key.ses_iv, ivlen);
			else
				arc4random_buf(key.ses_iv, ivlen);

			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
				if (crp->crp_flags & CRYPTO_F_IMBUF)
					err = m_copyback(q->q_src_m,
					    enccrd->crd_inject,
					    ivlen, key.ses_iv, M_NOWAIT);
				else if (crp->crp_flags & CRYPTO_F_IOV)
					cuio_copyback(q->q_src_io,
					    enccrd->crd_inject,
					    ivlen, key.ses_iv);
				if (err)
					goto errout;
			}
		} else {
			flags |= htole16(UBS_PKTCTX_INBOUND);

			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
				bcopy(enccrd->crd_iv, key.ses_iv, ivlen);
			else if (crp->crp_flags & CRYPTO_F_IMBUF)
				m_copydata(q->q_src_m, enccrd->crd_inject,
				    ivlen, (caddr_t)key.ses_iv);
			else if (crp->crp_flags & CRYPTO_F_IOV)
				cuio_copydata(q->q_src_io,
				    enccrd->crd_inject, ivlen,
				    (caddr_t)key.ses_iv);
		}

		for (i = 0; i < (keylen / 4); i++)
			key.ses_key[i] = ses->ses_key[i];
		for (i = 0; i < (ivlen / 4); i++)
			SWAP32(key.ses_iv[i]);
	}

	if (maccrd) {
		macoffset = maccrd->crd_skip;

		if (maccrd->crd_alg == CRYPTO_MD5_HMAC)
			flags |= htole16(UBS_PKTCTX_AUTH_MD5);
		else
			flags |= htole16(UBS_PKTCTX_AUTH_SHA1);

		for (i = 0; i < 5; i++) {
			key.ses_hminner[i] = ses->ses_hminner[i];
			key.ses_hmouter[i] = ses->ses_hmouter[i];

			HTOLE32(key.ses_hminner[i]);
			HTOLE32(key.ses_hmouter[i]);
		}
	}

	if (enccrd && maccrd) {
		/*
		 * ubsec cannot handle packets where the end of encryption
		 * and authentication are not the same, or where the
		 * encrypted part begins before the authenticated part.
		 */
		if (((encoffset + enccrd->crd_len) !=
		    (macoffset + maccrd->crd_len)) ||
		    (enccrd->crd_skip < maccrd->crd_skip)) {
			err = EINVAL;
			goto errout;
		}
		sskip = maccrd->crd_skip;
		cpskip = dskip = enccrd->crd_skip;
		stheend = maccrd->crd_len;
		dtheend = enccrd->crd_len;
		coffset = enccrd->crd_skip - maccrd->crd_skip;
		cpoffset = cpskip + dtheend;
#ifdef UBSEC_DEBUG
		printf("mac: skip %d, len %d, inject %d\n",
 		    maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject);
		printf("enc: skip %d, len %d, inject %d\n",
		    enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject);
		printf("src: skip %d, len %d\n", sskip, stheend);
		printf("dst: skip %d, len %d\n", dskip, dtheend);
		printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n",
		    coffset, stheend, cpskip, cpoffset);
#endif
	} else {
		cpskip = dskip = sskip = macoffset + encoffset;
		dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len;
		cpoffset = cpskip + dtheend;
		coffset = 0;
	}

	if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER,
		0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) {
		err = ENOMEM;
		goto errout;
	}
	if (crp->crp_flags & CRYPTO_F_IMBUF) {
		if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map,
		    q->q_src_m, BUS_DMA_NOWAIT) != 0) {
			bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
			q->q_src_map = NULL;
			err = ENOMEM;
			goto errout;
		}
	} else if (crp->crp_flags & CRYPTO_F_IOV) {
		if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map,
		    q->q_src_io, BUS_DMA_NOWAIT) != 0) {
			bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
			q->q_src_map = NULL;
			err = ENOMEM;
			goto errout;
		}
	}
	nicealign = ubsec_dmamap_aligned(q->q_src_map);

	dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend);

#ifdef UBSEC_DEBUG
	printf("src skip: %d\n", sskip);
#endif
	for (i = j = 0; i < q->q_src_map->dm_nsegs; i++) {
		struct ubsec_pktbuf *pb;
		bus_size_t packl = q->q_src_map->dm_segs[i].ds_len;
		bus_addr_t packp = q->q_src_map->dm_segs[i].ds_addr;

		if (sskip >= packl) {
			sskip -= packl;
			continue;
		}

		packl -= sskip;
		packp += sskip;
		sskip = 0;

		if (packl > 0xfffc) {
			err = EIO;
			goto errout;
		}

		if (j == 0)
			pb = &dmap->d_dma->d_mcr.mcr_ipktbuf;
		else
			pb = &dmap->d_dma->d_sbuf[j - 1];

		pb->pb_addr = htole32(packp);

		if (stheend) {
			if (packl > stheend) {
				pb->pb_len = htole32(stheend);
				stheend = 0;
			} else {
				pb->pb_len = htole32(packl);
				stheend -= packl;
			}
		} else
			pb->pb_len = htole32(packl);

		if ((i + 1) == q->q_src_map->dm_nsegs)
			pb->pb_next = 0;
		else
			pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
			    offsetof(struct ubsec_dmachunk, d_sbuf[j]));
		j++;
	}

	if (enccrd == NULL && maccrd != NULL) {
		dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0;
		dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0;
		dmap->d_dma->d_mcr.mcr_opktbuf.pb_next =
		    htole32(dmap->d_alloc.dma_paddr +
		    offsetof(struct ubsec_dmachunk, d_macbuf[0]));
#ifdef UBSEC_DEBUG
		printf("opkt: %x %x %x\n",
		    dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr,
		    dmap->d_dma->d_mcr.mcr_opktbuf.pb_len,
		    dmap->d_dma->d_mcr.mcr_opktbuf.pb_next);
#endif
	} else {