Пример #1
0
/*
 * mbuf chain defragmenter. This function uses some evil tricks to defragment
 * an mbuf chain into a single buffer without changing the mbuf pointer.
 * This needs to know a lot of the mbuf internals to make this work.
 */
int
m_defrag(struct mbuf *m, int how)
{
	struct mbuf *m0;

	if (m->m_next == NULL)
		return (0);

#ifdef DIAGNOSTIC
	if (!(m->m_flags & M_PKTHDR))
		panic("m_defrag: no packet hdr or not a chain");
#endif

	if ((m0 = m_gethdr(how, m->m_type)) == NULL)
		return (ENOBUFS);
	if (m->m_pkthdr.len > MHLEN) {
		MCLGETI(m0, how, NULL, m->m_pkthdr.len);
		if (!(m0->m_flags & M_EXT)) {
			m_free(m0);
			return (ENOBUFS);
		}
	}
	m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
	m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;

	/* free chain behind and possible ext buf on the first mbuf */
	m_freem(m->m_next);
	m->m_next = NULL;
	if (m->m_flags & M_EXT)
		m_extfree(m);

	/*
	 * Bounce copy mbuf over to the original mbuf and set everything up.
	 * This needs to reset or clear all pointers that may go into the
	 * original mbuf chain.
	 */
	if (m0->m_flags & M_EXT) {
		memcpy(&m->m_ext, &m0->m_ext, sizeof(struct mbuf_ext));
		MCLINITREFERENCE(m);
		m->m_flags |= m0->m_flags & (M_EXT|M_EXTWR);
		m->m_data = m->m_ext.ext_buf;
	} else {
		m->m_data = m->m_pktdat;
		memcpy(m->m_data, m0->m_data, m0->m_len);
	}
	m->m_pkthdr.len = m->m_len = m0->m_len;

	m0->m_flags &= ~(M_EXT|M_EXTWR);	/* cluster is gone */
	m_free(m0);

	return (0);
}
Пример #2
0
struct mbuf *
tsec_alloc_mbuf(struct tsec_softc *sc, bus_dmamap_t map)
{
	struct mbuf *m = NULL;

	m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_ac.ac_if, MCLBYTES);
	if (!m)
		return (NULL);
	m->m_len = m->m_pkthdr.len = MCLBYTES;

	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
		m_freem(m);
		return (NULL);
	}

	bus_dmamap_sync(sc->sc_dmat, map, 0,
	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);

	return (m);
}
Пример #3
0
void
nfe_rxeof(struct nfe_softc *sc)
{
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	struct nfe_desc32 *desc32;
	struct nfe_desc64 *desc64;
	struct nfe_rx_data *data;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m, *mnew;
	bus_addr_t physaddr;
#if NVLAN > 0
	uint32_t vtag;
#endif
	uint16_t flags;
	int error, len;

	for (;;) {
		data = &sc->rxq.data[sc->rxq.cur];

		if (sc->sc_flags & NFE_40BIT_ADDR) {
			desc64 = &sc->rxq.desc64[sc->rxq.cur];
			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);

			flags = letoh16(desc64->flags);
			len = letoh16(desc64->length) & 0x3fff;
#if NVLAN > 0
			vtag = letoh32(desc64->physaddr[1]);
#endif
		} else {
			desc32 = &sc->rxq.desc32[sc->rxq.cur];
			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);

			flags = letoh16(desc32->flags);
			len = letoh16(desc32->length) & 0x3fff;
		}

		if (flags & NFE_RX_READY)
			break;

		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
			if (!(flags & NFE_RX_VALID_V1))
				goto skip;

			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
				flags &= ~NFE_RX_ERROR;
				len--;	/* fix buffer length */
			}
		} else {
			if (!(flags & NFE_RX_VALID_V2))
				goto skip;

			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
				flags &= ~NFE_RX_ERROR;
				len--;	/* fix buffer length */
			}
		}

		if (flags & NFE_RX_ERROR) {
			ifp->if_ierrors++;
			goto skip;
		}

		/*
		 * Try to allocate a new mbuf for this ring element and load
		 * it before processing the current mbuf. If the ring element
		 * cannot be loaded, drop the received packet and reuse the
		 * old mbuf. In the unlikely case that the old mbuf can't be
		 * reloaded either, explicitly panic.
		 */
		mnew = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT);
		if (mnew == NULL) {
			ifp->if_ierrors++;
			goto skip;
		}
		mnew->m_pkthdr.len = mnew->m_len = MCLBYTES;

		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
		    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
		bus_dmamap_unload(sc->sc_dmat, data->map);

		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mnew,
		    BUS_DMA_READ | BUS_DMA_NOWAIT);
		if (error != 0) {
			m_freem(mnew);

			/* try to reload the old mbuf */
			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map,
			    m, BUS_DMA_READ | BUS_DMA_NOWAIT);
			if (error != 0) {
				/* very unlikely that it will fail.. */
				panic("%s: could not load old rx mbuf",
				    sc->sc_dev.dv_xname);
			}
			ifp->if_ierrors++;
			goto skip;
		}
		physaddr = data->map->dm_segs[0].ds_addr;

		/*
		 * New mbuf successfully loaded, update Rx ring and continue
		 * processing.
		 */
		m = data->m;
		data->m = mnew;

		/* finalize mbuf */
		m->m_pkthdr.len = m->m_len = len;

		if ((sc->sc_flags & NFE_HW_CSUM) &&
		    (flags & NFE_RX_IP_CSUMOK)) {
			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
			if (flags & NFE_RX_UDP_CSUMOK)
				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
			if (flags & NFE_RX_TCP_CSUMOK)
				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
		}

#if NVLAN > 0
		if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) {
			m->m_pkthdr.ether_vtag = vtag & 0xffff;
			m->m_flags |= M_VLANTAG;
		}
#endif

		ml_enqueue(&ml, m);

		/* update mapping address in h/w descriptor */
		if (sc->sc_flags & NFE_40BIT_ADDR) {
#if defined(__LP64__)
			desc64->physaddr[0] = htole32(physaddr >> 32);
#endif
			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
		} else {
Пример #4
0
/*
 * Rearrange an mbuf chain so that len bytes are contiguous
 * and in the data area of an mbuf (so that mtod will work
 * for a structure of size len).  Returns the resulting
 * mbuf chain on success, frees it and returns null on failure.
 */
struct mbuf *
m_pullup(struct mbuf *n, int len)
{
	struct mbuf *m;
	int count;

	/*
	 * If first mbuf has no cluster, and has room for len bytes
	 * without shifting current data, pullup into it,
	 * otherwise allocate a new mbuf to prepend to the chain.
	 */
	if ((n->m_flags & M_EXT) == 0 && n->m_next &&
	    n->m_data + len < &n->m_dat[MLEN]) {
		if (n->m_len >= len)
			return (n);
		m = n;
		n = n->m_next;
		len -= m->m_len;
	} else if ((n->m_flags & M_EXT) != 0 && len > MHLEN && n->m_next &&
	    n->m_data + len < &n->m_ext.ext_buf[n->m_ext.ext_size]) {
		if (n->m_len >= len)
			return (n);
		m = n;
		n = n->m_next;
		len -= m->m_len;
	} else {
		if (len > MAXMCLBYTES)
			goto bad;
		MGET(m, M_DONTWAIT, n->m_type);
		if (m == NULL)
			goto bad;
		if (len > MHLEN) {
			MCLGETI(m, M_DONTWAIT, NULL, len);
			if ((m->m_flags & M_EXT) == 0) {
				m_free(m);
				goto bad;
			}
		}
		m->m_len = 0;
		if (n->m_flags & M_PKTHDR)
			M_MOVE_PKTHDR(m, n);
	}

	do {
		count = min(len, n->m_len);
		memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
		    count);
		len -= count;
		m->m_len += count;
		n->m_len -= count;
		if (n->m_len)
			n->m_data += count;
		else
			n = m_free(n);
	} while (len > 0 && n);
	if (len > 0) {
		(void)m_free(m);
		goto bad;
	}
	m->m_next = n;

	return (m);
bad:
	m_freem(n);
	return (NULL);
}
Пример #5
0
/*
 * Copy data from a buffer back into the indicated mbuf chain,
 * starting "off" bytes from the beginning, extending the mbuf
 * chain if necessary. The mbuf needs to be properly initialized
 * including the setting of m_len.
 */
int
m_copyback(struct mbuf *m0, int off, int len, const void *_cp, int wait)
{
	int mlen, totlen = 0;
	struct mbuf *m = m0, *n;
	caddr_t cp = (caddr_t)_cp;
	int error = 0;

	if (m0 == NULL)
		return (0);
	while (off > (mlen = m->m_len)) {
		off -= mlen;
		totlen += mlen;
		if (m->m_next == NULL) {
			if ((n = m_get(wait, m->m_type)) == NULL) {
				error = ENOBUFS;
				goto out;
			}

			if (off + len > MLEN) {
				MCLGETI(n, wait, NULL, off + len);
				if (!(n->m_flags & M_EXT)) {
					m_free(n);
					error = ENOBUFS;
					goto out;
				}
			}
			memset(mtod(n, caddr_t), 0, off);
			n->m_len = len + off;
			m->m_next = n;
		}
		m = m->m_next;
	}
	while (len > 0) {
		/* extend last packet to be filled fully */
		if (m->m_next == NULL && (len > m->m_len - off))
			m->m_len += min(len - (m->m_len - off),
			    M_TRAILINGSPACE(m));
		mlen = min(m->m_len - off, len);
		memmove(mtod(m, caddr_t) + off, cp, mlen);
		cp += mlen;
		len -= mlen;
		totlen += mlen + off;
		if (len == 0)
			break;
		off = 0;

		if (m->m_next == NULL) {
			if ((n = m_get(wait, m->m_type)) == NULL) {
				error = ENOBUFS;
				goto out;
			}

			if (len > MLEN) {
				MCLGETI(n, wait, NULL, len);
				if (!(n->m_flags & M_EXT)) {
					m_free(n);
					error = ENOBUFS;
					goto out;
				}
			}
			n->m_len = len;
			m->m_next = n;
		}
		m = m->m_next;
	}
out:
	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
		m->m_pkthdr.len = totlen;

	return (error);
}
Пример #6
0
void
vnet_rx_vio_dring_data(struct vnet_softc *sc, struct vio_msg_tag *tag)
{
	struct vio_dring_msg *dm = (struct vio_dring_msg *)tag;
	struct ldc_conn *lc = &sc->sc_lc;
	struct ifnet *ifp = &sc->sc_ac.ac_if;
	struct mbuf *m;
	paddr_t pa;
	psize_t nbytes;
	int err;

	switch(tag->stype) {
	case VIO_SUBTYPE_INFO:
	{
		struct vnet_desc desc;
		uint64_t cookie;
		paddr_t desc_pa;
		int idx, ack_end_idx = -1;
		struct mbuf_list ml = MBUF_LIST_INITIALIZER();

		idx = dm->start_idx;
		for (;;) {
			cookie = sc->sc_peer_dring_cookie.addr;
			cookie += idx * sc->sc_peer_desc_size;
			nbytes = sc->sc_peer_desc_size;
			pmap_extract(pmap_kernel(), (vaddr_t)&desc, &desc_pa);
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN, cookie,
			    desc_pa, nbytes, &nbytes);
			if (err != H_EOK) {
				printf("hv_ldc_copy_in %d\n", err);
				break;
			}

			if (desc.hdr.dstate != VIO_DESC_READY)
				break;

			if (desc.nbytes > (ETHER_MAX_LEN - ETHER_CRC_LEN)) {
				ifp->if_ierrors++;
				goto skip;
			}

			m = MCLGETI(NULL, M_DONTWAIT, NULL, desc.nbytes);
			if (!m)
				break;
			m->m_len = m->m_pkthdr.len = desc.nbytes;
			nbytes = roundup(desc.nbytes + VNET_ETHER_ALIGN, 8);

			pmap_extract(pmap_kernel(), (vaddr_t)m->m_data, &pa);
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN,
			    desc.cookie[0].addr, pa, nbytes, &nbytes);
			if (err != H_EOK) {
				m_freem(m);
				goto skip;
			}
			m->m_data += VNET_ETHER_ALIGN;

			ml_enqueue(&ml, m);

		skip:
			desc.hdr.dstate = VIO_DESC_DONE;
			nbytes = sc->sc_peer_desc_size;
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_OUT, cookie,
			    desc_pa, nbytes, &nbytes);
			if (err != H_EOK)
				printf("hv_ldc_copy_out %d\n", err);

			ack_end_idx = idx;
			if (++idx == sc->sc_peer_dring_nentries)
				idx = 0;
		}

		if_input(ifp, &ml);

		if (ack_end_idx == -1) {
			dm->tag.stype = VIO_SUBTYPE_NACK;
		} else {
			dm->tag.stype = VIO_SUBTYPE_ACK;
			dm->end_idx = ack_end_idx;
		}
		dm->tag.sid = sc->sc_local_sid;
		dm->proc_state = VIO_DP_STOPPED;
		vnet_sendmsg(sc, dm, sizeof(*dm));
		break;
	}

	case VIO_SUBTYPE_ACK:
	{
		struct ldc_map *map = sc->sc_lm;
		u_int cons, count;

		sc->sc_peer_state = dm->proc_state;

		cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);
		while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) {
			map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0;
			atomic_dec_int(&map->lm_count);

			pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf);
			ifp->if_opackets++;

			sc->sc_vd->vd_desc[cons].hdr.dstate = VIO_DESC_FREE;
			sc->sc_tx_cons++;
			cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);
		}

		count = sc->sc_tx_prod - sc->sc_tx_cons;
		if (count > 0 && sc->sc_peer_state != VIO_DP_ACTIVE)
			vnet_send_dring_data(sc, cons);

		KERNEL_LOCK();
		if (count < (sc->sc_vd->vd_nentries - 1))
			ifp->if_flags &= ~IFF_OACTIVE;
		if (count == 0)
			ifp->if_timer = 0;

		vnet_start(ifp);
		KERNEL_UNLOCK();
		break;
	}

	case VIO_SUBTYPE_NACK:
		DPRINTF(("DATA/NACK/DRING_DATA\n"));
		sc->sc_peer_state = VIO_DP_STOPPED;
		break;

	default:
		DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
		break;
	}
}
Пример #7
0
int
rtwn_alloc_rx_list(struct rtwn_pci_softc *sc)
{
	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
	struct rtwn_rx_data *rx_data;
	size_t size;
	int i, error = 0;

	/* Allocate Rx descriptors. */
	size = sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT;
	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
		&rx_ring->map);
	if (error != 0) {
		printf("%s: could not create rx desc DMA map\n",
		    sc->sc_dev.dv_xname);
		rx_ring->map = NULL;
		goto fail;
	}

	error = bus_dmamem_alloc(sc->sc_dmat, size, 0, 0, &rx_ring->seg, 1,
	    &rx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
	if (error != 0) {
		printf("%s: could not allocate rx desc\n",
		    sc->sc_dev.dv_xname);
		goto fail;
	}

	error = bus_dmamem_map(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs,
	    size, (caddr_t *)&rx_ring->desc, 
	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
	if (error != 0) {
		bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs);
		rx_ring->desc = NULL;
		printf("%s: could not map rx desc\n", sc->sc_dev.dv_xname);
		goto fail;
	}

	error = bus_dmamap_load_raw(sc->sc_dmat, rx_ring->map, &rx_ring->seg,
	    1, size, BUS_DMA_NOWAIT);
	if (error != 0) {
		printf("%s: could not load rx desc\n",
		    sc->sc_dev.dv_xname);
		goto fail;
	}

	bus_dmamap_sync(sc->sc_dmat, rx_ring->map, 0, size,
	    BUS_DMASYNC_PREWRITE);

	/* Allocate Rx buffers. */
	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
		rx_data = &rx_ring->rx_data[i];

		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
		    0, BUS_DMA_NOWAIT, &rx_data->map);
		if (error != 0) {
			printf("%s: could not create rx buf DMA map\n",
			    sc->sc_dev.dv_xname);
			goto fail;
		}

		rx_data->m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
		if (rx_data->m == NULL) {
			printf("%s: could not allocate rx mbuf\n",
			    sc->sc_dev.dv_xname);
			error = ENOMEM;
			goto fail;
		}

		error = bus_dmamap_load(sc->sc_dmat, rx_data->map,
		    mtod(rx_data->m, void *), MCLBYTES, NULL,
		    BUS_DMA_NOWAIT | BUS_DMA_READ);
		if (error != 0) {
			printf("%s: could not load rx buf DMA map\n",
			    sc->sc_dev.dv_xname);
			goto fail;
		}

		rtwn_setup_rx_desc(sc, &rx_ring->desc[i],
		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i);
	}
fail:	if (error != 0)
		rtwn_free_rx_list(sc);
	return (error);
}
Пример #8
0
	if (infosz > sizeof(struct r92c_rx_phystat))
		infosz = sizeof(struct r92c_rx_phystat);
	shift = MS(rxdw0, R92C_RXDW0_SHIFT);

	/* Get RSSI from PHY status descriptor if present. */
	if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) {
		phy = mtod(rx_data->m, struct r92c_rx_phystat *);
		rssi = rtwn_get_rssi(&sc->sc_sc, rate, phy);
		/* Update our average RSSI. */
		rtwn_update_avgrssi(&sc->sc_sc, rate, rssi);
	}

	DPRINTFN(5, ("Rx frame len=%d rate=%d infosz=%d shift=%d rssi=%d\n",
	    pktlen, rate, infosz, shift, rssi));

	m1 = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
	if (m1 == NULL) {
		ifp->if_ierrors++;
		return;
	}
	bus_dmamap_unload(sc->sc_dmat, rx_data->map);
	error = bus_dmamap_load(sc->sc_dmat, rx_data->map,
	    mtod(m1, void *), MCLBYTES, NULL,
	    BUS_DMA_NOWAIT | BUS_DMA_READ);
	if (error != 0) {
		m_freem(m1);

		if (bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map,
		    rx_data->m, BUS_DMA_NOWAIT))
			panic("%s: could not load old RX mbuf",
			    sc->sc_dev.dv_xname);