Exemple #1
0
void *m_ext_get(struct vmm_mbuf *m, u32 size, enum vmm_mbuf_alloc_types how)
{
	void *buf;
	u32 slab;
	struct mempool *mp = NULL;

	if (VMM_MBUF_ALLOC_DMA == how) {
		buf = vmm_dma_malloc(size);
		if (!buf) {
			return NULL;
		}
		m->m_flags |= M_EXT_DMA;
		MEXTADD(m, buf, size, ext_dma_free, NULL);
	} else {
		for (slab = 0; slab < EPOOL_SLAB_COUNT; slab++) {
			if (size <= epool_slab_buf_size(slab)) {
				mp = mbpctrl.epool_slabs[slab];
				break;
			}
		}

		if (mp && (buf = mempool_malloc(mp))) {
			m->m_flags |= M_EXT_POOL;
			MEXTADD(m, buf, size, ext_pool_free, mp);
		} else if ((buf = vmm_malloc(size))) {
			m->m_flags |= M_EXT_HEAP;
			MEXTADD(m, buf, size, ext_heap_free, NULL);
		} else {
			return NULL;
		}
	}

	return m->m_extbuf;
}
Exemple #2
0
struct mbuf *
m_clget(struct mbuf *m, int how, u_int pktlen)
{
	struct mbuf *m0 = NULL;
	struct pool *pp;
	caddr_t buf;

	pp = m_clpool(pktlen);
#ifdef DIAGNOSTIC
	if (pp == NULL)
		panic("m_clget: request for %u byte cluster", pktlen);
#endif

	if (m == NULL) {
		m0 = m_gethdr(how, MT_DATA);
		if (m0 == NULL)
			return (NULL);

		m = m0;
	}
	buf = pool_get(pp, how == M_WAIT ? PR_WAITOK : PR_NOWAIT);
	if (buf == NULL) {
		if (m0)
			m_freem(m0);
		return (NULL);
	}

	MEXTADD(m, buf, pp->pr_size, M_EXTWR, m_extfree_pool, pp);
	return (m);
}
Exemple #3
0
static int
mb_ctor_clust_extref(void *mem, int size, void *arg, int how)
{
	struct mbuf *m = (struct mbuf *)arg;
	MEXTADD(m, mem, size, NULL, NULL, NULL, 0, EXT_JUMBOP);

	return (0);
}
Exemple #4
0
int
octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
    struct mbuf **rm)
{
	struct mbuf *m;
	void (*ext_free)(caddr_t, u_int, void *);
	void *ext_buf;
	size_t ext_size;
	void *data;
	uint64_t word1 = work[1];
	uint64_t word2 = work[2];
	uint64_t word3 = work[3];

	MGETHDR(m, M_NOWAIT, MT_DATA);
	if (m == NULL)
		return 1;
	OCTEON_ETH_KASSERT(m != NULL);

	if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) {
		/* Dynamic short */
		ext_free = octeon_eth_buf_ext_free_m;
		ext_buf = &work[4];
		ext_size = 96;

		data = &work[4 + sc->sc_ip_offset / sizeof(uint64_t)];
	} else {
		vaddr_t addr;
		vaddr_t start_buffer;

		addr = PHYS_TO_XKPHYS(word3 & PIP_WQE_WORD3_ADDR, CCA_CACHED);
		start_buffer = addr & ~(2048 - 1);

		ext_free = octeon_eth_buf_ext_free_ext;
		ext_buf = (void *)start_buffer;
		ext_size = 2048;

		data = (void *)addr;
	}

	MEXTADD(m, ext_buf, ext_size, 0, ext_free, work);
	OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT));

	m->m_data = data;
	m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
#if 0
	/*
	 * not readonly buffer
	 */
	m->m_flags |= M_EXT_RW;
#endif

	*rm = m;

	OCTEON_ETH_KASSERT(*rm != NULL);

	return 0;
}
Exemple #5
0
static err_t lwip_netstack_output(struct netif *netif, struct pbuf *p)
{
	struct vmm_mbuf *mbuf, *mbuf_head, *mbuf_cur;
	struct pbuf *q;
	struct lwip_netstack *lns = netif->state;

	if (!p || !p->payload || !p->len) {
		return ERR_OK;
	}

	if (p->tot_len > MAX_FRAME_LEN) {
		/* Frame too long, drop it */
		return ERR_MEM;
	}

	/* Increase reference to the pbuf as we reuse the same buffers */
	pbuf_ref(p);

	/* Create the first mbuf in the chain */
	MGETHDR(mbuf_head, 0, 0);
	MEXTADD(mbuf_head, p->payload, p->len, lwip_netstack_mbuf_free, p);
	mbuf_cur = mbuf_head;

	/* Create next mbufs in chain from the pbuf chain */
	q = p->next;
	while (q != NULL) {
		MGET(mbuf, 0, M_EXT_DONTFREE);
		MEXTADD(mbuf, q->payload, q->len, NULL, NULL);
		mbuf_cur->m_next = mbuf;
		mbuf_cur = mbuf;
		q = q->next;
	}

	/* Setup mbuf len */
	mbuf_head->m_len = mbuf_head->m_pktlen = p->tot_len;

	/* Send mbuf to the netswitch */
	vmm_port2switch_xfer_mbuf(lns->port, mbuf_head);

	/* Return success */
	return ERR_OK;
}
Exemple #6
0
void *m_ext_get(struct vmm_mbuf *m, u32 size, int how)
{
	void *buf;
	u32 slab;
	struct mempool *mp;

	mp = NULL;
	for (slab = 0; slab < EPOOL_SLAB_COUNT; slab++) {
		if (size <= epool_slab_buf_size(slab)) {
			mp = mbpctrl.epool_slabs[slab];
			break;
		}
	}

	if (mp && (buf = mempool_malloc(mp))) {
		MEXTADD(m, buf, size, ext_pool_free, mp);
	} else if ((buf = vmm_malloc(size))) {
		MEXTADD(m, buf, size, NULL, NULL);
	}

	return m->m_extbuf;
}
Exemple #7
0
/*
 * Our transmit interrupt. This is triggered when a new block is to be
 * sent.  We send mtu sized chunks of the block as mbufs with external
 * storage to sco_send_pcb()
 */
static void
btsco_intr(void *arg)
{
	struct btsco_softc *sc = arg;
	struct mbuf *m;
	uint8_t *block;
	int mlen, size;

	DPRINTFN(10, "%s block %p size %d\n",
	    sc->sc_name, sc->sc_tx_block, sc->sc_tx_size);

	if (sc->sc_sco == NULL)
		return;		/* connection is lost */

	block = sc->sc_tx_block;
	size = sc->sc_tx_size;
	sc->sc_tx_block = NULL;
	sc->sc_tx_size = 0;

	mutex_enter(bt_lock);
	while (size > 0) {
		MGETHDR(m, M_DONTWAIT, MT_DATA);
		if (m == NULL)
			break;

		mlen = MIN(sc->sc_mtu, size);

		/* I think M_DEVBUF is true but not relevant */
		MEXTADD(m, block, mlen, M_DEVBUF, btsco_extfree, sc);
		if ((m->m_flags & M_EXT) == 0) {
			m_free(m);
			break;
		}
		sc->sc_tx_refcnt++;

		m->m_pkthdr.len = m->m_len = mlen;
		sc->sc_tx_pending++;

		if (sco_send_pcb(sc->sc_sco, m) > 0) {
			sc->sc_tx_pending--;
			break;
		}

		block += mlen;
		size -= mlen;
	}
	mutex_exit(bt_lock);
}
Exemple #8
0
/*
 * m_ext_dma_ensure: Ensure that the data buffer is DMA proof, reallocating
 * and copying data to do so.
 */
void m_ext_dma_ensure(struct vmm_mbuf *m)
{
	char *buf = NULL;

	if (vmm_is_dma(m->m_extbuf)) {
		return;
	}

	buf = vmm_dma_malloc(m->m_len);
	memcpy(buf, m->m_extbuf, m->m_len);
	if (m->m_extfree) {
		m->m_extfree(m, m->m_extbuf, m->m_extlen, m->m_extarg);
	} else {
		vmm_free(m->m_extbuf);
	}
	MEXTADD(m, buf, m->m_len, ext_dma_free, 0);
}
Exemple #9
0
int netdev_switch2port_xfer(struct vmm_netport *port,
		struct vmm_mbuf *mbuf)
{
	int rc = VMM_OK;
	struct net_device *dev = (struct net_device *) port->priv;
	char *buf;
	int len;

	if(mbuf->m_next) {
		/* Cannot avoid a copy in case of fragmented mbuf data */
		len = min(dev->mtu, (unsigned int)mbuf->m_pktlen);
		buf = vmm_malloc(len);
		m_copydata(mbuf, 0, len, buf);
		m_freem(mbuf);
		MGETHDR(mbuf, 0, 0);
		MEXTADD(mbuf, buf, len, 0, 0);
	}

	dev->netdev_ops->ndo_start_xmit(mbuf, dev);

	return rc;
}
Exemple #10
0
/**
 *  uIP-daemon calls this directly to send out the frame 
 *  present in uip_buf 
 */
void uip_netport_send(void)
{
	struct vmm_mbuf *mbuf;
	struct uip_port_state *s = &uip_port_state;
	struct vmm_netport *port = s->port;
	if(!s->link_down && (uip_len > 0)) {
		/* Create a mbuf out of the uip_buf and uip_len */
		MGETHDR(mbuf, 0, 0);
		MEXTADD(mbuf, uip_buf, UIP_BUFSIZE + 2, NULL, NULL);
		mbuf->m_len = mbuf->m_pktlen = uip_len;

		if(memcmp(ether_dstmac(uip_buf), uip_ethaddr.addr, 6)) {
			/* send this mbuf to the netswitch if it is
			 * not addressed to us */
			vmm_port2switch_xfer(port, mbuf);
		} else {
			uip_netport_loopback_send(mbuf);
		}
		/* Allocate a new replacement uip_buf */
		uip_buf = vmm_malloc(UIP_BUFSIZE + 2);
	}
	/* Do we need the following ? perhaps not */
	/* uip_len = 0; */
}
/*
 * Construct and reliably send a netdump packet.  May fail from a resource
 * shortage or extreme number of unacknowledged retransmissions.  Wait for
 * an acknowledgement before returning.  Splits packets into chunks small
 * enough to be sent without fragmentation (looks up the interface MTU)
 *
 * Parameters:
 *	type	netdump packet type (HERALD, FINISHED, or VMCORE)
 *	offset	vmcore data offset (bytes)
 *	data	vmcore data
 *	datalen	vmcore data size (bytes)
 *
 * Returns:
 *	int see errno.h, 0 for success
 */
static int
netdump_send(uint32_t type, off_t offset, unsigned char *data, uint32_t datalen)
{
	struct netdump_msg_hdr *nd_msg_hdr;
	struct mbuf *m, *m2;
	uint64_t want_acks;
	uint32_t i, pktlen, sent_so_far;
	int retries, polls, error;

	want_acks = 0;
	rcvd_acks = 0;
	retries = 0;

	MPASS(nd_ifp != NULL);

retransmit:
	/* Chunks can be too big to fit in packets. */
	for (i = sent_so_far = 0; sent_so_far < datalen ||
	    (i == 0 && datalen == 0); i++) {
		pktlen = datalen - sent_so_far;

		/* First bound: the packet structure. */
		pktlen = min(pktlen, NETDUMP_DATASIZE);

		/* Second bound: the interface MTU (assume no IP options). */
		pktlen = min(pktlen, nd_ifp->if_mtu - sizeof(struct udpiphdr) -
		    sizeof(struct netdump_msg_hdr));

		/*
		 * Check if it is retransmitting and this has been ACKed
		 * already.
		 */
		if ((rcvd_acks & (1 << i)) != 0) {
			sent_so_far += pktlen;
			continue;
		}

		/*
		 * Get and fill a header mbuf, then chain data as an extended
		 * mbuf.
		 */
		m = m_gethdr(M_NOWAIT, MT_DATA);
		if (m == NULL) {
			printf("netdump_send: Out of mbufs\n");
			return (ENOBUFS);
		}
		m->m_len = sizeof(struct netdump_msg_hdr);
		m->m_pkthdr.len = sizeof(struct netdump_msg_hdr);
		MH_ALIGN(m, sizeof(struct netdump_msg_hdr));
		nd_msg_hdr = mtod(m, struct netdump_msg_hdr *);
		nd_msg_hdr->mh_seqno = htonl(nd_seqno + i);
		nd_msg_hdr->mh_type = htonl(type);
		nd_msg_hdr->mh_offset = htobe64(offset + sent_so_far);
		nd_msg_hdr->mh_len = htonl(pktlen);
		nd_msg_hdr->mh__pad = 0;

		if (pktlen != 0) {
			m2 = m_get(M_NOWAIT, MT_DATA);
			if (m2 == NULL) {
				m_freem(m);
				printf("netdump_send: Out of mbufs\n");
				return (ENOBUFS);
			}
			MEXTADD(m2, data + sent_so_far, pktlen,
			    netdump_mbuf_free, NULL, NULL, 0, EXT_DISPOSABLE);
			m2->m_len = pktlen;

			m_cat(m, m2);
			m->m_pkthdr.len += pktlen;
		}
		error = netdump_udp_output(m);
		if (error != 0)
			return (error);

		/* Note that we're waiting for this packet in the bitfield. */
		want_acks |= (1 << i);
		sent_so_far += pktlen;
	}
	if (i >= NETDUMP_MAX_IN_FLIGHT)
		printf("Warning: Sent more than %d packets (%d). "
		    "Acknowledgements will fail unless the size of "
		    "rcvd_acks/want_acks is increased.\n",
		    NETDUMP_MAX_IN_FLIGHT, i);

	/*
	 * Wait for acks.  A *real* window would speed things up considerably.
	 */
	polls = 0;
	while (rcvd_acks != want_acks) {
		if (polls++ > nd_polls) {
			if (retries++ > nd_retries)
				return (ETIMEDOUT);
			printf(". ");
			goto retransmit;
		}
		netdump_network_poll();
		DELAY(500);
	}
	nd_seqno += i;
	return (0);
}
Exemple #12
0
int
octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
    struct mbuf **rm)
{
	struct mbuf *m;
	void (*ext_free)(caddr_t, u_int, void *);
	void *ext_buf;
	size_t ext_size;
	caddr_t data;
	uint64_t word1 = work[1];
	uint64_t word2 = work[2];
	uint64_t word3 = work[3];

	MGETHDR(m, M_NOWAIT, MT_DATA);
	if (m == NULL)
		return 1;
	OCTEON_ETH_KASSERT(m != NULL);

	if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) {
		/* Dynamic short */
		ext_free = octeon_eth_buf_ext_free_m;
		ext_buf = &work[4];
		ext_size = 96;

		/*
		 * If the packet is IP, the hardware has padded it so that the
		 * IP source address starts on the next 64-bit word boundary.
		 */
		data = (caddr_t)&work[4] + ETHER_ALIGN;
		if (!ISSET(word2, PIP_WQE_WORD2_IP_NI) &&
		    !ISSET(word2, PIP_WQE_WORD2_IP_V6))
			data += 4;
	} else {
		vaddr_t addr;
		vaddr_t start_buffer;

		addr = PHYS_TO_XKPHYS(word3 & PIP_WQE_WORD3_ADDR, CCA_CACHED);
		start_buffer = addr & ~(2048 - 1);

		ext_free = octeon_eth_buf_ext_free_ext;
		ext_buf = (void *)start_buffer;
		ext_size = 2048;

		data = (void *)addr;
	}

	MEXTADD(m, ext_buf, ext_size, 0, ext_free, work);
	OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT));

	m->m_data = data;
	m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
#if 0
	/*
	 * not readonly buffer
	 */
	m->m_flags |= M_EXT_RW;
#endif

	*rm = m;

	OCTEON_ETH_KASSERT(*rm != NULL);

	return 0;
}
Exemple #13
0
void
nfe_rxeof(struct nfe_softc *sc)
{
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	struct nfe_desc32 *desc32;
	struct nfe_desc64 *desc64;
	struct nfe_rx_data *data;
	struct nfe_jbuf *jbuf;
	struct mbuf *m, *mnew;
	bus_addr_t physaddr;
	uint16_t flags;
	int error, len;

	for (;;) {
		data = &sc->rxq.data[sc->rxq.cur];

		if (sc->sc_flags & NFE_40BIT_ADDR) {
			desc64 = &sc->rxq.desc64[sc->rxq.cur];
			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);

			flags = letoh16(desc64->flags);
			len = letoh16(desc64->length) & 0x3fff;
		} else {
			desc32 = &sc->rxq.desc32[sc->rxq.cur];
			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);

			flags = letoh16(desc32->flags);
			len = letoh16(desc32->length) & 0x3fff;
		}

		if (flags & NFE_RX_READY)
			break;

		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
			if (!(flags & NFE_RX_VALID_V1))
				goto skip;

			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
				flags &= ~NFE_RX_ERROR;
				len--;	/* fix buffer length */
			}
		} else {
			if (!(flags & NFE_RX_VALID_V2))
				goto skip;

			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
				flags &= ~NFE_RX_ERROR;
				len--;	/* fix buffer length */
			}
		}

		if (flags & NFE_RX_ERROR) {
			ifp->if_ierrors++;
			goto skip;
		}

		/*
		 * Try to allocate a new mbuf for this ring element and load
		 * it before processing the current mbuf. If the ring element
		 * cannot be loaded, drop the received packet and reuse the
		 * old mbuf. In the unlikely case that the old mbuf can't be
		 * reloaded either, explicitly panic.
		 */
		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
		if (mnew == NULL) {
			ifp->if_ierrors++;
			goto skip;
		}

		if (sc->sc_flags & NFE_USE_JUMBO) {
			if ((jbuf = nfe_jalloc(sc)) == NULL) {
				m_freem(mnew);
				ifp->if_ierrors++;
				goto skip;
			}
			MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc);

			bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap,
			    mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES,
			    BUS_DMASYNC_POSTREAD);

			physaddr = jbuf->physaddr;
		} else {
			MCLGET(mnew, M_DONTWAIT);
			if (!(mnew->m_flags & M_EXT)) {
				m_freem(mnew);
				ifp->if_ierrors++;
				goto skip;
			}

			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
			bus_dmamap_unload(sc->sc_dmat, data->map);

			error = bus_dmamap_load(sc->sc_dmat, data->map,
			    mtod(mnew, void *), MCLBYTES, NULL,
			    BUS_DMA_READ | BUS_DMA_NOWAIT);
			if (error != 0) {
				m_freem(mnew);

				/* try to reload the old mbuf */
				error = bus_dmamap_load(sc->sc_dmat, data->map,
				    mtod(data->m, void *), MCLBYTES, NULL,
				    BUS_DMA_READ | BUS_DMA_NOWAIT);
				if (error != 0) {
					/* very unlikely that it will fail.. */
					panic("%s: could not load old rx mbuf",
					    sc->sc_dev.dv_xname);
				}
				ifp->if_ierrors++;
				goto skip;
			}
			physaddr = data->map->dm_segs[0].ds_addr;
		}

		/*
		 * New mbuf successfully loaded, update Rx ring and continue
		 * processing.
		 */
		m = data->m;
		data->m = mnew;

		/* finalize mbuf */
		m->m_pkthdr.len = m->m_len = len;
		m->m_pkthdr.rcvif = ifp;

		if ((sc->sc_flags & NFE_HW_CSUM) &&
		    (flags & NFE_RX_IP_CSUMOK)) {
			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
			if (flags & NFE_RX_UDP_CSUMOK)
				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
			if (flags & NFE_RX_TCP_CSUMOK)
				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
		}

#if NBPFILTER > 0
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
#endif
		ifp->if_ipackets++;
		ether_input_mbuf(ifp, m);

		/* update mapping address in h/w descriptor */
		if (sc->sc_flags & NFE_40BIT_ADDR) {
#if defined(__LP64__)
			desc64->physaddr[0] = htole32(physaddr >> 32);
#endif
			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
		} else {
Exemple #14
0
int
socow_setup(struct mbuf *m0, struct uio *uio)
{
	struct sf_buf *sf;
	vm_page_t pp;
	struct iovec *iov;
	struct vmspace *vmspace;
	struct vm_map *map;
	vm_offset_t offset, uva;

	socow_stats.attempted++;
	vmspace = curproc->p_vmspace;
	map = &vmspace->vm_map;
	uva = (vm_offset_t) uio->uio_iov->iov_base;
	offset = uva & PAGE_MASK;

	/*
	 * Verify that access to the given address is allowed from user-space.
	 */
	if (vm_fault_quick((caddr_t)uva, VM_PROT_READ) < 0)
		return (0);

       /* 
	* verify page is mapped & not already wired for i/o
	*/
	pp = pmap_extract_and_hold(map->pmap, uva, VM_PROT_READ);
	if (pp == NULL) {
		socow_stats.fail_not_mapped++;
		return(0);
	}

	/* 
	 * set up COW
	 */
	vm_page_lock(pp);
	if (vm_page_cowsetup(pp) != 0) {
		vm_page_unhold(pp);
		vm_page_unlock(pp);
		return (0);
	}

	/*
	 * wire the page for I/O
	 */
	vm_page_wire(pp);
	vm_page_unhold(pp);
	vm_page_unlock(pp);
	/*
	 * Allocate an sf buf
	 */
	sf = sf_buf_alloc(pp, SFB_CATCH);
	if (sf == NULL) {
		vm_page_lock(pp);
		vm_page_cowclear(pp);
		vm_page_unwire(pp, 0);
		/*
		 * Check for the object going away on us. This can
		 * happen since we don't hold a reference to it.
		 * If so, we're responsible for freeing the page.
		 */
		if (pp->wire_count == 0 && pp->object == NULL)
			vm_page_free(pp);
		vm_page_unlock(pp);
		socow_stats.fail_sf_buf++;
		return(0);
	}
	/* 
	 * attach to mbuf
	 */
	MEXTADD(m0, sf_buf_kva(sf), PAGE_SIZE, socow_iodone,
	    (void*)sf_buf_kva(sf), sf, M_RDONLY, EXT_SFBUF);
	m0->m_len = PAGE_SIZE - offset;
	m0->m_data = (caddr_t)sf_buf_kva(sf) + offset;
	socow_stats.success++;

	iov = uio->uio_iov;
	iov->iov_base = (char *)iov->iov_base + m0->m_len;
	iov->iov_len -= m0->m_len;
	uio->uio_resid -= m0->m_len;
	uio->uio_offset += m0->m_len;
	if (iov->iov_len == 0) {
		uio->uio_iov++;
		uio->uio_iovcnt--;
	}

	return(m0->m_len);
}
void m_cljset(struct mbuf *m, void *cl, int type)
{
    MEXTADD(m, cl, m->m_len, M_DEVBUF, NULL, NULL);
}
Exemple #16
0
/* Async. stream output */
static void
fwe_as_input(struct fw_xferq *xferq)
{
	struct mbuf *m;
	struct ether_header *eh;
	struct ifnet *ifp;
	struct fw_xfer *xfer;
	struct fwe_softc *fwe;
	u_char *c;
	int len;
	caddr_t p;

	fwe = (struct fwe_softc *)xferq->sc;
	ifp = &fwe->fwe_if;
#if 0
	FWE_POLL_REGISTER(fwe_poll, fwe, ifp);
#endif
	while ((xfer = STAILQ_FIRST(&xferq->q)) != NULL) {
		STAILQ_REMOVE_HEAD(&xferq->q, link);
		xferq->queued --;
		MGETHDR(m, M_DONTWAIT, MT_DATA);
		if (m == NULL) {
			printf("MGETHDR failed\n");
			fw_xfer_free(xfer);
			return;
		}
		len = xfer->recv.off + xfer->recv.len;
		FWEDEBUG("fwe_as_input len=%d\n", len);
#if __FreeBSD_version >= 500000
		MEXTADD(m, xfer->recv.buf, len, fwe_free, NULL, 0, EXT_NET_DRV);
#else
		m->m_flags |= M_EXT;
		m->m_ext.ext_buf = xfer->recv.buf;
		m->m_ext.ext_size = len;
		m->m_ext.ext_free = fwe_free;
		m->m_ext.ext_ref = fwe_ref;
		*((int *)m->m_ext.ext_buf) = 1;  /* XXX refcount */
#endif
		p = xfer->recv.buf + xfer->recv.off + HDR_LEN + ALIGN_PAD;
		eh = (struct ether_header *)p;
#if __FreeBSD_version >= 500000
		len -= xfer->recv.off + HDR_LEN + ALIGN_PAD;
#else
		p += sizeof(struct ether_header);
		len -= xfer->recv.off + HDR_LEN + ALIGN_PAD
						+ sizeof(struct ether_header);
#endif
		m->m_data = p;
		m->m_len = m->m_pkthdr.len = len;
		m->m_pkthdr.rcvif = ifp;
		c = (char *)eh;
#if 0
		FWEDEBUG("%02x %02x %02x %02x %02x %02x\n"
			 "%02x %02x %02x %02x %02x %02x\n"
			 "%02x %02x %02x %02x\n"
			 "%02x %02x %02x %02x\n"
			 "%02x %02x %02x %02x\n"
			 "%02x %02x %02x %02x\n",
			 c[0], c[1], c[2], c[3], c[4], c[5],
			 c[6], c[7], c[8], c[9], c[10], c[11],
			 c[12], c[13], c[14], c[15],
			 c[16], c[17], c[18], c[19],
			 c[20], c[21], c[22], c[23],
			 c[20], c[21], c[22], c[23]
		 );
#endif
#if __FreeBSD_version >= 500000
		(*ifp->if_input)(ifp, m);
#else
		ether_input(ifp, eh, m);
#endif
		ifp->if_ipackets ++;

		xfer->recv.buf = NULL;
		fw_xfer_free(xfer);
	}
}