Beispiel #1
0
static int
ffec_setup_rxbuf(struct ffec_softc *sc, int idx, struct mbuf * m)
{
	int error, nsegs;
	struct bus_dma_segment seg;

	/*
	 * We need to leave at least ETHER_ALIGN bytes free at the beginning of
	 * the buffer to allow the data to be re-aligned after receiving it (by
	 * copying it backwards ETHER_ALIGN bytes in the same buffer).  We also
	 * have to ensure that the beginning of the buffer is aligned to the
	 * hardware's requirements.
	 */
	m_adj(m, roundup(ETHER_ALIGN, FEC_RXBUF_ALIGN));

	error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
	    m, &seg, &nsegs, 0);
	if (error != 0) {
		return (error);
	}

	bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map, 
	    BUS_DMASYNC_PREREAD);

	sc->rxbuf_map[idx].mbuf = m;
	ffec_setup_rxdesc(sc, idx, seg.ds_addr);
	
	return (0);
}
Beispiel #2
0
int
sfxge_dma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map,
    struct mbuf **mp, bus_dma_segment_t *segs, int *nsegs, int maxsegs)
{
	bus_dma_segment_t *psegs;
	struct mbuf *m;
	int seg_count;
	int defragged;
	int err;

	m = *mp;
	defragged = err = seg_count = 0;

	KASSERT(m->m_pkthdr.len, ("packet has zero header length"));

retry:
	psegs = segs;
	seg_count = 0;
	if (m->m_next == NULL) {
		sfxge_map_mbuf_fast(tag, map, m, segs);
		*nsegs = 1;
		return (0);
	}
#if defined(__i386__) || defined(__amd64__)
	while (m && seg_count < maxsegs) {
		/*
		 * firmware doesn't like empty segments
		 */
		if (m->m_len != 0) {
			seg_count++;
			sfxge_map_mbuf_fast(tag, map, m, psegs);
			psegs++;
		}
		m = m->m_next;
	}
#else
	err = bus_dmamap_load_mbuf_sg(tag, map, *mp, segs, &seg_count, 0);
#endif
	if (seg_count == 0) {
		err = EFBIG;
		goto err_out;
	} else if (err == EFBIG || seg_count >= maxsegs) {
		if (!defragged) {
			m = m_defrag(*mp, M_NOWAIT);
			if (m == NULL) {
				err = ENOBUFS;
				goto err_out;
			}
			*mp = m;
			defragged = 1;
			goto retry;
		}
		err = EFBIG;
		goto err_out;
	}
	*nsegs = seg_count;

err_out:
	return (err);
}
Beispiel #3
0
static int
ffec_setup_txbuf(struct ffec_softc *sc, int idx, struct mbuf **mp)
{
	struct mbuf * m;
	int error, nsegs;
	struct bus_dma_segment seg;

	if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
		return (ENOMEM);
	*mp = m;

	error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
	    m, &seg, &nsegs, 0);
	if (error != 0) {
		return (ENOMEM);
	}
	bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map, 
	    BUS_DMASYNC_PREWRITE);

	sc->txbuf_map[idx].mbuf = m;
	ffec_setup_txdesc(sc, idx, seg.ds_addr, seg.ds_len);

	return (0);

}
Beispiel #4
0
static int
awg_setup_txbuf(struct awg_softc *sc, int index, struct mbuf **mp)
{
	bus_dma_segment_t segs[TX_MAX_SEGS];
	int error, nsegs, cur, i, flags;
	u_int csum_flags;
	struct mbuf *m;

	m = *mp;
	error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
	    sc->tx.buf_map[index].map, m, segs, &nsegs, BUS_DMA_NOWAIT);
	if (error == EFBIG) {
		m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
		if (m == NULL)
			return (0);
		*mp = m;
		error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
		    sc->tx.buf_map[index].map, m, segs, &nsegs, BUS_DMA_NOWAIT);
	}
	if (error != 0)
		return (0);

	bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[index].map,
	    BUS_DMASYNC_PREWRITE);

	flags = TX_FIR_DESC;
	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
		if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0)
			csum_flags = TX_CHECKSUM_CTL_FULL;
		else
			csum_flags = TX_CHECKSUM_CTL_IP;
		flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT);
	}

	for (cur = index, i = 0; i < nsegs; i++) {
		sc->tx.buf_map[cur].mbuf = (i == 0 ? m : NULL);
		if (i == nsegs - 1)
			flags |= TX_LAST_DESC;
		awg_setup_txdesc(sc, cur, flags, segs[i].ds_addr,
		    segs[i].ds_len);
		flags &= ~TX_FIR_DESC;
		cur = TX_NEXT(cur);
	}

	return (nsegs);
}
Beispiel #5
0
/*
 * Initialize an RX descriptor and attach an MBUF cluster.
 */
static int
kr_newbuf(struct kr_softc *sc, int idx)
{
	struct kr_desc		*desc;
	struct kr_rxdesc	*rxd;
	struct mbuf		*m;
	bus_dma_segment_t	segs[1];
	bus_dmamap_t		map;
	int			nsegs;

	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
	if (m == NULL)
		return (ENOBUFS);
	m->m_len = m->m_pkthdr.len = MCLBYTES;
	m_adj(m, sizeof(uint64_t));

	if (bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_rx_tag,
	    sc->kr_cdata.kr_rx_sparemap, m, segs, &nsegs, 0) != 0) {
		m_freem(m);
		return (ENOBUFS);
	}
	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));

	rxd = &sc->kr_cdata.kr_rxdesc[idx];
	if (rxd->rx_m != NULL) {
		bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap,
		    BUS_DMASYNC_POSTREAD);
		bus_dmamap_unload(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap);
	}
	map = rxd->rx_dmamap;
	rxd->rx_dmamap = sc->kr_cdata.kr_rx_sparemap;
	sc->kr_cdata.kr_rx_sparemap = map;
	bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap,
	    BUS_DMASYNC_PREREAD);
	rxd->rx_m = m;
	desc = rxd->desc;
	desc->kr_ca = segs[0].ds_addr;
	desc->kr_ctl |= KR_DMASIZE(segs[0].ds_len);
	rxd->saved_ca = desc->kr_ca ;
	rxd->saved_ctl = desc->kr_ctl ;

	return (0);
}
Beispiel #6
0
static int
awg_setup_rxbuf(struct awg_softc *sc, int index, struct mbuf *m)
{
	bus_dma_segment_t seg;
	int error, nsegs;

	m_adj(m, ETHER_ALIGN);

	error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag,
	    sc->rx.buf_map[index].map, m, &seg, &nsegs, 0);
	if (error != 0)
		return (error);

	bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
	    BUS_DMASYNC_PREREAD);

	sc->rx.buf_map[index].mbuf = m;
	awg_setup_rxdesc(sc, index, seg.ds_addr);

	return (0);
}
Beispiel #7
0
/*
 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
 * pointers to the fragment pointers.
 */
static int
kr_encap(struct kr_softc *sc, struct mbuf **m_head)
{
	struct kr_txdesc	*txd;
	struct kr_desc		*desc, *prev_desc;
	bus_dma_segment_t	txsegs[KR_MAXFRAGS];
	uint32_t		link_addr;
	int			error, i, nsegs, prod, si, prev_prod;

	KR_LOCK_ASSERT(sc);

	prod = sc->kr_cdata.kr_tx_prod;
	txd = &sc->kr_cdata.kr_txdesc[prod];
	error = bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap,
	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
	if (error == EFBIG) {
		panic("EFBIG");
	} else if (error != 0)
		return (error);
	if (nsegs == 0) {
		m_freem(*m_head);
		*m_head = NULL;
		return (EIO);
	}

	/* Check number of available descriptors. */
	if (sc->kr_cdata.kr_tx_cnt + nsegs >= (KR_TX_RING_CNT - 1)) {
		bus_dmamap_unload(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap);
		return (ENOBUFS);
	}

	txd->tx_m = *m_head;
	bus_dmamap_sync(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap,
	    BUS_DMASYNC_PREWRITE);

	si = prod;

	/* 
	 * Make a list of descriptors for this packet. DMA controller will
	 * walk through it while kr_link is not zero. The last one should
	 * have COF flag set, to pickup next chain from NDPTR
	 */
	prev_prod = prod;
	desc = prev_desc = NULL;
	for (i = 0; i < nsegs; i++) {
		desc = &sc->kr_rdata.kr_tx_ring[prod];
		desc->kr_ctl = KR_DMASIZE(txsegs[i].ds_len) | KR_CTL_IOF;
		if (i == 0)
			desc->kr_devcs = KR_DMATX_DEVCS_FD;
		desc->kr_ca = txsegs[i].ds_addr;
		desc->kr_link = 0;
		/* link with previous descriptor */
		if (prev_desc)
			prev_desc->kr_link = KR_TX_RING_ADDR(sc, prod);

		sc->kr_cdata.kr_tx_cnt++;
		prev_desc = desc;
		KR_INC(prod, KR_TX_RING_CNT);
	}

	/* 
	 * Set COF for last descriptor and mark last fragment with LD flag
	 */
	if (desc) {
		desc->kr_ctl |=  KR_CTL_COF;
		desc->kr_devcs |= KR_DMATX_DEVCS_LD;
	}

	/* Update producer index. */
	sc->kr_cdata.kr_tx_prod = prod;

	/* Sync descriptors. */
	bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
	    sc->kr_cdata.kr_tx_ring_map,
	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

	/* Start transmitting */
	/* Check if new list is queued in NDPTR */
	if (KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_NDPTR) == 0) {
		/* NDPTR is not busy - start new list */
		KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 
		    KR_TX_RING_ADDR(sc, si));
	}
	else {
		link_addr = KR_TX_RING_ADDR(sc, si);
		/* Get previous descriptor */
		si = (si + KR_TX_RING_CNT - 1) % KR_TX_RING_CNT;
		desc = &sc->kr_rdata.kr_tx_ring[si];
		desc->kr_link = link_addr;
	}

	return (0);
}
Beispiel #8
0
static int
ath_legacy_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_hal *ah = sc->sc_ah;
	int error;
	struct mbuf *m;
	struct ath_desc *ds;

	/* XXX TODO: ATH_RX_LOCK_ASSERT(sc); */

	m = bf->bf_m;
	if (m == NULL) {
		/*
		 * NB: by assigning a page to the rx dma buffer we
		 * implicitly satisfy the Atheros requirement that
		 * this buffer be cache-line-aligned and sized to be
		 * multiple of the cache line size.  Not doing this
		 * causes weird stuff to happen (for the 5210 at least).
		 */
		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
		if (m == NULL) {
			DPRINTF(sc, ATH_DEBUG_ANY,
				"%s: no mbuf/cluster\n", __func__);
			sc->sc_stats.ast_rx_nombuf++;
			return ENOMEM;
		}
		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;

		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
					     bf->bf_dmamap, m,
					     bf->bf_segs, &bf->bf_nseg,
					     BUS_DMA_NOWAIT);
		if (error != 0) {
			DPRINTF(sc, ATH_DEBUG_ANY,
			    "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
			    __func__, error);
			sc->sc_stats.ast_rx_busdma++;
			m_freem(m);
			return error;
		}
		KASSERT(bf->bf_nseg == 1,
			("multi-segment packet; nseg %u", bf->bf_nseg));
		bf->bf_m = m;
	}
	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);

	/*
	 * Setup descriptors.  For receive we always terminate
	 * the descriptor list with a self-linked entry so we'll
	 * not get overrun under high load (as can happen with a
	 * 5212 when ANI processing enables PHY error frames).
	 *
	 * To insure the last descriptor is self-linked we create
	 * each descriptor as self-linked and add it to the end.  As
	 * each additional descriptor is added the previous self-linked
	 * entry is ``fixed'' naturally.  This should be safe even
	 * if DMA is happening.  When processing RX interrupts we
	 * never remove/process the last, self-linked, entry on the
	 * descriptor list.  This insures the hardware always has
	 * someplace to write a new frame.
	 */
	/*
	 * 11N: we can no longer afford to self link the last descriptor.
	 * MAC acknowledges BA status as long as it copies frames to host
	 * buffer (or rx fifo). This can incorrectly acknowledge packets
	 * to a sender if last desc is self-linked.
	 */
	ds = bf->bf_desc;
	if (sc->sc_rxslink)
		ds->ds_link = bf->bf_daddr;	/* link to self */
	else
		ds->ds_link = 0;		/* terminate the list */
	ds->ds_data = bf->bf_segs[0].ds_addr;
	ath_hal_setuprxdesc(ah, ds
		, m->m_len		/* buffer size */
		, 0
	);

	if (sc->sc_rxlink != NULL)
		*sc->sc_rxlink = bf->bf_daddr;
	sc->sc_rxlink = &ds->ds_link;
	return 0;
}
Beispiel #9
0
static int
ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
{
	struct ixl_vsi		*vsi = que->vsi;
	struct i40e_hw		*hw = vsi->hw;
	struct tx_ring		*txr = &que->txr;
	struct ixl_tx_buf	*buf;
	struct i40e_tx_desc	*txd = NULL;
	struct mbuf		*m_head, *m;
	int             	i, j, error, nsegs, maxsegs;
	int			first, last = 0;
	u16			vtag = 0;
	u32			cmd, off;
	bus_dmamap_t		map;
	bus_dma_tag_t		tag;
	bus_dma_segment_t	segs[IXL_MAX_TSO_SEGS];


	cmd = off = 0;
	m_head = *m_headp;

        /*
         * Important to capture the first descriptor
         * used because it will contain the index of
         * the one we tell the hardware to report back
         */
        first = txr->next_avail;
	buf = &txr->buffers[first];
	map = buf->map;
	tag = txr->tx_tag;
	maxsegs = IXL_MAX_TX_SEGS;

	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
		/* Use larger mapping for TSO */
		tag = txr->tso_tag;
		maxsegs = IXL_MAX_TSO_SEGS;
		if (ixl_tso_detect_sparse(m_head)) {
			m = m_defrag(m_head, M_NOWAIT);
			*m_headp = m;
		}
	}

	/*
	 * Map the packet for DMA.
	 */
	error = bus_dmamap_load_mbuf_sg(tag, map,
	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);

	if (error == EFBIG) {
		struct mbuf *m;

		m = m_collapse(*m_headp, M_NOWAIT, maxsegs);
		if (m == NULL) {
			que->mbuf_defrag_failed++;
			m_freem(*m_headp);
			*m_headp = NULL;
			return (ENOBUFS);
		}
		*m_headp = m;

		/* Try it again */
		error = bus_dmamap_load_mbuf_sg(tag, map,
		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);

		if (error == ENOMEM) {
			que->tx_dma_setup++;
			return (error);
		} else if (error != 0) {
			que->tx_dma_setup++;
			m_freem(*m_headp);
			*m_headp = NULL;
			return (error);
		}
	} else if (error == ENOMEM) {
		que->tx_dma_setup++;
		return (error);
	} else if (error != 0) {
		que->tx_dma_setup++;
		m_freem(*m_headp);
		*m_headp = NULL;
		return (error);
	}

	/* Make certain there are enough descriptors */
	if (nsegs > txr->avail - 2) {
		txr->no_desc++;
		error = ENOBUFS;
		goto xmit_fail;
	}
	m_head = *m_headp;

	/* Set up the TSO/CSUM offload */
	if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
		error = ixl_tx_setup_offload(que, m_head, &cmd, &off);
		if (error)
			goto xmit_fail;
	}

	cmd |= I40E_TX_DESC_CMD_ICRC;
	/* Grab the VLAN tag */
	if (m_head->m_flags & M_VLANTAG) {
		cmd |= I40E_TX_DESC_CMD_IL2TAG1;
		vtag = htole16(m_head->m_pkthdr.ether_vtag);
	}

	i = txr->next_avail;
	for (j = 0; j < nsegs; j++) {
		bus_size_t seglen;

		buf = &txr->buffers[i];
		buf->tag = tag; /* Keep track of the type tag */
		txd = &txr->base[i];
		seglen = segs[j].ds_len;

		txd->buffer_addr = htole64(segs[j].ds_addr);
		txd->cmd_type_offset_bsz =
		    htole64(I40E_TX_DESC_DTYPE_DATA
		    | ((u64)cmd  << I40E_TXD_QW1_CMD_SHIFT)
		    | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
		    | ((u64)seglen  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
		    | ((u64)vtag  << I40E_TXD_QW1_L2TAG1_SHIFT));

		last = i; /* descriptor that will get completion IRQ */

		if (++i == que->num_desc)
			i = 0;

		buf->m_head = NULL;
		buf->eop_index = -1;
	}
	/* Set the last descriptor for report */
	txd->cmd_type_offset_bsz |=
	    htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
	txr->avail -= nsegs;
	txr->next_avail = i;

	buf->m_head = m_head;
	/* Swap the dma map between the first and last descriptor */
	txr->buffers[first].map = buf->map;
	buf->map = map;
	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);

        /* Set the index of the descriptor that will be marked done */
        buf = &txr->buffers[first];
	buf->eop_index = last;

        bus_dmamap_sync(txr->dma.tag, txr->dma.map,
            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
	/*
	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
	 * hardware that this frame is available to transmit.
	 */
	++txr->total_packets;
	wr32(hw, txr->tail, i);

	ixl_flush(hw);
	/* Mark outstanding work */
	if (que->busy == 0)
		que->busy = 1;
	return (0);

xmit_fail:
	bus_dmamap_unload(tag, buf->map);
	return (error);
}
Beispiel #10
0
/*
 * Allocate and setup an initial beacon frame.
 */
int
ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
{
	struct ieee80211vap *vap = ni->ni_vap;
	struct ath_vap *avp = ATH_VAP(vap);
	struct ath_buf *bf;
	struct mbuf *m;
	int error;

	bf = avp->av_bcbuf;
	DPRINTF(sc, ATH_DEBUG_NODE, "%s: bf_m=%p, bf_node=%p\n",
	    __func__, bf->bf_m, bf->bf_node);
	if (bf->bf_m != NULL) {
		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
		m_freem(bf->bf_m);
		bf->bf_m = NULL;
	}
	if (bf->bf_node != NULL) {
		ieee80211_free_node(bf->bf_node);
		bf->bf_node = NULL;
	}

	/*
	 * NB: the beacon data buffer must be 32-bit aligned;
	 * we assume the mbuf routines will return us something
	 * with this alignment (perhaps should assert).
	 */
	m = ieee80211_beacon_alloc(ni, &avp->av_boff);
	if (m == NULL) {
		device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__);
		sc->sc_stats.ast_be_nombuf++;
		return ENOMEM;
	}
	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
				     bf->bf_segs, &bf->bf_nseg,
				     BUS_DMA_NOWAIT);
	if (error != 0) {
		device_printf(sc->sc_dev,
		    "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n",
		    __func__, error);
		m_freem(m);
		return error;
	}

	/*
	 * Calculate a TSF adjustment factor required for staggered
	 * beacons.  Note that we assume the format of the beacon
	 * frame leaves the tstamp field immediately following the
	 * header.
	 */
	if (sc->sc_stagbeacons && avp->av_bslot > 0) {
		uint64_t tsfadjust;
		struct ieee80211_frame *wh;

		/*
		 * The beacon interval is in TU's; the TSF is in usecs.
		 * We figure out how many TU's to add to align the timestamp
		 * then convert to TSF units and handle byte swapping before
		 * inserting it in the frame.  The hardware will then add this
		 * each time a beacon frame is sent.  Note that we align vap's
		 * 1..N and leave vap 0 untouched.  This means vap 0 has a
		 * timestamp in one beacon interval while the others get a
		 * timstamp aligned to the next interval.
		 */
		tsfadjust = ni->ni_intval *
		    (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF;
		tsfadjust = htole64(tsfadjust << 10);	/* TU -> TSF */

		DPRINTF(sc, ATH_DEBUG_BEACON,
		    "%s: %s beacons bslot %d intval %u tsfadjust %llu\n",
		    __func__, sc->sc_stagbeacons ? "stagger" : "burst",
		    avp->av_bslot, ni->ni_intval,
		    (long long unsigned) le64toh(tsfadjust));

		wh = mtod(m, struct ieee80211_frame *);
		memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
	}
Beispiel #11
0
/*
 * Send a command to the firmware.  We try to implement the Linux
 * driver interface for the routine.
 * mostly from if_iwn (iwn_cmd()).
 *
 * For now, we always copy the first part and map the second one (if it exists).
 */
int
iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
{
	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
	struct iwm_tfd *desc;
	struct iwm_tx_data *txdata = NULL;
	struct iwm_device_cmd *cmd;
	struct mbuf *m;
	bus_dma_segment_t seg;
	bus_addr_t paddr;
	uint32_t addr_lo;
	int error = 0, i, paylen, off;
	int code;
	int async, wantresp;
	int group_id;
	int nsegs;
	size_t hdrlen, datasz;
	uint8_t *data;

	code = hcmd->id;
	async = hcmd->flags & IWM_CMD_ASYNC;
	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
	data = NULL;

	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
		paylen += hcmd->len[i];
	}

	/* if the command wants an answer, busy sc_cmd_resp */
	if (wantresp) {
		KASSERT(!async, ("invalid async parameter"));
		while (sc->sc_wantresp != -1)
			msleep(&sc->sc_wantresp, &sc->sc_mtx, 0, "iwmcmdsl", 0);
		sc->sc_wantresp = ring->qid << 16 | ring->cur;
		IWM_DPRINTF(sc, IWM_DEBUG_CMD,
		    "wantresp is %x\n", sc->sc_wantresp);
	}

	/*
	 * Is the hardware still available?  (after e.g. above wait).
	 */
	if (sc->sc_flags & IWM_FLAG_STOPPED) {
		error = ENXIO;
		goto out;
	}

	desc = &ring->desc[ring->cur];
	txdata = &ring->data[ring->cur];

	group_id = iwm_cmd_groupid(code);
	if (group_id != 0) {
		hdrlen = sizeof(cmd->hdr_wide);
		datasz = sizeof(cmd->data_wide);
	} else {
		hdrlen = sizeof(cmd->hdr);
		datasz = sizeof(cmd->data);
	}

	if (paylen > datasz) {
		size_t totlen;
		IWM_DPRINTF(sc, IWM_DEBUG_CMD,
		    "large command paylen=%u len0=%u\n",
			paylen, hcmd->len[0]);
		/* Command is too large */
		totlen = hdrlen + paylen;
		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
			device_printf(sc->sc_dev,
			    "firmware command too long (%zd bytes)\n",
			    totlen);
			error = EINVAL;
			goto out;
		}
		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
		if (m == NULL) {
			error = ENOBUFS;
			goto out;
		}

		m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
		error = bus_dmamap_load_mbuf_sg(ring->data_dmat,
		    txdata->map, m, &seg, &nsegs, BUS_DMA_NOWAIT);
		if (error != 0) {
			device_printf(sc->sc_dev,
			    "%s: can't map mbuf, error %d\n", __func__, error);
			m_freem(m);
			goto out;
		}
		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
		cmd = mtod(m, struct iwm_device_cmd *);
		paddr = seg.ds_addr;
	} else {
Beispiel #12
0
int
busdma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map,
	struct mbuf **m, bus_dma_segment_t *segs, int *nsegs)
{
	struct mbuf *n = *m;
	int seg_count, defragged = 0, err = 0;
	bus_dma_segment_t *psegs;
	
	KASSERT(n->m_pkthdr.len, ("packet has zero header len"));
	if (n->m_pkthdr.len <= PIO_LEN)
		return (0);
retry:
	psegs = segs;
	seg_count = 0;
	if (n->m_next == NULL) {
		busdma_map_mbuf_fast(tag, map, n, segs);
		*nsegs = 1;
		return (0);
	}
#if defined(__i386__) || defined(__amd64__)
	while (n && seg_count < TX_MAX_SEGS) {
		/*
		 * firmware doesn't like empty segments
		 */
		if (__predict_true(n->m_len != 0)) {
			seg_count++;
			busdma_map_mbuf_fast(tag, map, n, psegs);
			psegs++;
		}
		n = n->m_next;
	}
#else
	err = bus_dmamap_load_mbuf_sg(tag, map, *m, segs, &seg_count, 0);
#endif	
	if (seg_count == 0) {
		if (cxgb_debug)
			printf("empty segment chain\n");
		err = EFBIG;
		goto err_out;
	}  else if (err == EFBIG || seg_count >= TX_MAX_SEGS) {
		if (cxgb_debug)
			printf("mbuf chain too long: %d max allowed %d\n",
			    seg_count, TX_MAX_SEGS);
		if (!defragged) {
			n = m_defrag(*m, M_NOWAIT);
			if (n == NULL) {
				err = ENOBUFS;
				goto err_out;
			}
			*m = n;
			defragged = 1;
			goto retry;
		}
		err = EFBIG;
		goto err_out;
	}

	*nsegs = seg_count;
err_out:	
	return (err);
}
Beispiel #13
0
static int
mlx4_en_alloc_buf(struct mlx4_en_rx_ring *ring,
     __be64 *pdma, struct mlx4_en_rx_mbuf *mb_list)
{
	bus_dma_segment_t segs[1];
	bus_dmamap_t map;
	struct mbuf *mb;
	int nsegs;
	int err;

	/* try to allocate a new spare mbuf */
	if (unlikely(ring->spare.mbuf == NULL)) {
		mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
		if (unlikely(mb == NULL))
			return (-ENOMEM);
		/* setup correct length */
		mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;

		/* make sure IP header gets aligned */
		m_adj(mb, MLX4_NET_IP_ALIGN);

		/* load spare mbuf into BUSDMA */
		err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, ring->spare.dma_map,
		    mb, segs, &nsegs, BUS_DMA_NOWAIT);
		if (unlikely(err != 0)) {
			m_freem(mb);
			return (err);
		}

		/* store spare info */
		ring->spare.mbuf = mb;
		ring->spare.paddr_be = cpu_to_be64(segs[0].ds_addr);

		bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
		    BUS_DMASYNC_PREREAD);
	}

	/* synchronize and unload the current mbuf, if any */
	if (likely(mb_list->mbuf != NULL)) {
		bus_dmamap_sync(ring->dma_tag, mb_list->dma_map,
		    BUS_DMASYNC_POSTREAD);
		bus_dmamap_unload(ring->dma_tag, mb_list->dma_map);
	}

	mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
	if (unlikely(mb == NULL))
		goto use_spare;

	/* setup correct length */
	mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;

	/* make sure IP header gets aligned */
	m_adj(mb, MLX4_NET_IP_ALIGN);

	err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, mb_list->dma_map,
	    mb, segs, &nsegs, BUS_DMA_NOWAIT);
	if (unlikely(err != 0)) {
		m_freem(mb);
		goto use_spare;
	}

	*pdma = cpu_to_be64(segs[0].ds_addr);
	mb_list->mbuf = mb;

	bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, BUS_DMASYNC_PREREAD);
	return (0);

use_spare:
	/* swap DMA maps */
	map = mb_list->dma_map;
	mb_list->dma_map = ring->spare.dma_map;
	ring->spare.dma_map = map;

	/* swap MBUFs */
	mb_list->mbuf = ring->spare.mbuf;
	ring->spare.mbuf = NULL;

	/* store physical address */
	*pdma = ring->spare.paddr_be;
	return (0);
}