示例#1
0
static void
ntb_start(struct ifnet *ifp)
{
	struct mbuf *m_head;
	struct ntb_netdev *nt = ifp->if_softc;
	int rc;

	mtx_lock(&nt->tx_lock);
	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
	CTR0(KTR_NTB, "TX: ntb_start");
	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
		CTR1(KTR_NTB, "TX: start mbuf %p", m_head);
		rc = ntb_transport_tx_enqueue(nt->qp, m_head, m_head,
			     m_length(m_head, NULL));
		if (rc != 0) {
			CTR1(KTR_NTB,
			    "TX: could not tx mbuf %p. Returning to snd q",
			    m_head);
			if (rc == EAGAIN) {
				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
				callout_reset(&nt->qp->queue_full, hz / 1000,
				    ntb_qp_full, ifp);
			}
			break;
		}

	}
	mtx_unlock(&nt->tx_lock);
}
示例#2
0
static void
ffec_txstart_locked(struct ffec_softc *sc)
{
	struct ifnet *ifp;
	struct mbuf *m;
	int enqueued;

	FFEC_ASSERT_LOCKED(sc);

	if (!sc->link_is_up)
		return;

	ifp = sc->ifp;

	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
		return;

	enqueued = 0;

	for (;;) {
		if (sc->txcount == (TX_DESC_COUNT-1)) {
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
			break;
		}
		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
		if (m == NULL)
			break;
		if (ffec_setup_txbuf(sc, sc->tx_idx_head, &m) != 0) {
			IFQ_DRV_PREPEND(&ifp->if_snd, m);
			break;
		}
		BPF_MTAP(ifp, m);
		sc->tx_idx_head = next_txidx(sc, sc->tx_idx_head);
		++enqueued;
	}

	if (enqueued != 0) {
		bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_PREWRITE);
		WR4(sc, FEC_TDAR_REG, FEC_TDAR_TDAR);
		bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_POSTWRITE);
		sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS;
	}
}
示例#3
0
static void
if_pcap_send(void *arg)
{
	struct mbuf *m;
	struct if_pcap_softc *sc = (struct if_pcap_softc *)arg;
	struct ifnet *ifp = sc->ifp;
	uint8_t copybuf[2048];
	uint8_t *pkt;
	unsigned int pktlen;

	if (sc->uif->cpu >= 0)
		sched_bind(sc->tx_thread, sc->uif->cpu);

	while (1) {
		mtx_lock(&sc->tx_lock);
		while (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
			mtx_sleep(&ifp->if_drv_flags, &sc->tx_lock, 0, "wtxlk", 0);
		}
		mtx_unlock(&sc->tx_lock);
	
		while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
			IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
			pktlen = m_length(m, NULL);

			ifp->if_opackets++;

			if (!sc->isfile && (pktlen <= sizeof(copybuf))) {			
				if (NULL == m->m_next) {
					/* all in one piece - avoid copy */
					pkt = mtod(m, uint8_t *);
					ifp->if_ozcopies++;
				} else {
					pkt = copybuf;
					m_copydata(m, 0, pktlen, pkt);
					ifp->if_ocopies++;
				}

				if (0 != if_pcap_sendpacket(sc->pcap_host_ctx, pkt, pktlen))
					ifp->if_oerrors++;
			} else {
				if (sc->isfile)
示例#4
0
static void
kr_start_locked(struct ifnet *ifp)
{
	struct kr_softc		*sc;
	struct mbuf		*m_head;
	int			enq;

	sc = ifp->if_softc;

	KR_LOCK_ASSERT(sc);

	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
	    IFF_DRV_RUNNING || sc->kr_link_status == 0 )
		return;

	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
	    sc->kr_cdata.kr_tx_cnt < KR_TX_RING_CNT - 2; ) {
		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
		if (m_head == NULL)
			break;
		/*
		 * Pack the data into the transmit ring. If we
		 * don't have room, set the OACTIVE flag and wait
		 * for the NIC to drain the ring.
		 */
		if (kr_encap(sc, &m_head)) {
			if (m_head == NULL)
				break;
			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
			break;
		}

		enq++;
		/*
		 * If there's a BPF listener, bounce a copy of this frame
		 * to him.
		 */
		ETHER_BPF_MTAP(ifp, m_head);
	}
}
示例#5
0
/**
 * @group dTSEC IFnet routines.
 * @{
 */
void
dtsec_im_if_start_locked(struct dtsec_softc *sc)
{
    uint8_t *buffer;
    uint16_t length;
    struct mbuf *m;
    int error;

    DTSEC_LOCK_ASSERT(sc);
    /* TODO: IFF_DRV_OACTIVE */

    if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) == 0)
        return;

    if ((sc->sc_ifnet->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
        return;

    while (!IFQ_DRV_IS_EMPTY(&sc->sc_ifnet->if_snd)) {
        IFQ_DRV_DEQUEUE(&sc->sc_ifnet->if_snd, m);
        if (m == NULL)
            break;

        length = m_length(m, NULL);
        buffer = XX_MallocSmart(length, 0, sizeof(void *));
        if (!buffer) {
            m_freem(m);
            break;
        }

        m_copydata(m, 0, length, buffer);
        m_freem(m);

        error = FM_PORT_ImTx(sc->sc_txph, buffer, length, TRUE, buffer);
        if (error != E_OK) {
            /* TODO: Ring full */
            XX_FreeSmart(buffer);
            break;
        }
    }
}
示例#6
0
static void
if_netmap_send(void *arg)
{
	struct mbuf *m;
	struct if_netmap_softc *sc = (struct if_netmap_softc *)arg;
	struct ifnet *ifp = sc->ifp;
	struct uhi_pollfd pfd;
	uint32_t avail;
	uint32_t cur;
	u_int pktlen;
	int rv;
	int done;
	int pkts_sent;

	if (sc->cfg->cpu >= 0)
		sched_bind(sc->tx_thread.thr, sc->cfg->cpu);

	rv = if_netmap_txsync(sc->nm_host_ctx, NULL, NULL);
	if (rv == -1) {
		printf("could not sync tx descriptors before transmit\n");
	}

	avail = if_netmap_txavail(sc->nm_host_ctx);

	sc->tx_thread.last_stop_check = ticks;
	done = 0;
	pkts_sent = 0;
	do {
		mtx_lock(&sc->tx_lock);
		sc->tx_pkts_to_send -= pkts_sent;
		while ((sc->tx_pkts_to_send == 0) && !done)
			if (EWOULDBLOCK == cv_timedwait(&sc->tx_cv, &sc->tx_lock, sc->stop_check_ticks))
				done = if_netmap_stoppable_thread_check(&sc->tx_thread);
		mtx_unlock(&sc->tx_lock);
	
		if (done)
			break;

		pkts_sent = 0;

		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
		while (m) {
			while (0 == avail && !done) {
				memset(&pfd, 0, sizeof(pfd));

				pfd.fd = sc->fd;
				pfd.events = UHI_POLLOUT;
				
				rv = uhi_poll(&pfd, 1, IF_NETMAP_THREAD_STOP_CHECK_MS);
				if (rv == 0)
					done = if_netmap_stoppable_thread_check(&sc->tx_thread);	
				else if (rv == -1)
					printf("error from poll for transmit\n");
					
				avail = if_netmap_txavail(sc->nm_host_ctx);
			}

			if (ticks - sc->tx_thread.last_stop_check >= sc->stop_check_ticks)
				done = if_netmap_stoppable_thread_check(&sc->tx_thread);

			if (done)
				break;

			cur = if_netmap_txcur(sc->nm_host_ctx);

			while (m && avail) {
				ifp->if_ocopies++;
				ifp->if_opackets++;

				avail--;
				pkts_sent++;

				pktlen = m_length(m, NULL);

				m_copydata(m, 0, pktlen,
					   if_netmap_txslot(sc->nm_host_ctx, &cur, pktlen)); 
				m_freem(m);

				IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
			}

			rv = if_netmap_txsync(sc->nm_host_ctx, &avail, &cur);
			if (rv == -1) {
				printf("could not sync tx descriptors after transmit\n");
			}
			avail = if_netmap_txavail(sc->nm_host_ctx);
		}

	} while (!done);

	if_netmap_stoppable_thread_done(&sc->tx_thread);
}
示例#7
0
static void
smc_start_locked(struct ifnet *ifp)
{
	struct smc_softc	*sc;
	struct mbuf		*m;
	u_int			len, npages, spin_count;

	sc = ifp->if_softc;
	SMC_ASSERT_LOCKED(sc);

	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
		return;
	if (IFQ_IS_EMPTY(&ifp->if_snd))
		return;

	/*
	 * Grab the next packet.  If it's too big, drop it.
	 */
	IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
	len = m_length(m, NULL);
	len += (len & 1);
	if (len > ETHER_MAX_LEN - ETHER_CRC_LEN) {
		if_printf(ifp, "large packet discarded\n");
		++ifp->if_oerrors;
		m_freem(m);
		return; /* XXX readcheck? */
	}

	/*
	 * Flag that we're busy.
	 */
	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
	sc->smc_pending = m;

	/*
	 * Work out how many 256 byte "pages" we need.  We have to include the
	 * control data for the packet in this calculation.
	 */
	npages = (len * PKT_CTRL_DATA_LEN) >> 8;
	if (npages == 0)
		npages = 1;

	/*
	 * Request memory.
	 */
	smc_select_bank(sc, 2);
	smc_mmu_wait(sc);
	smc_write_2(sc, MMUCR, MMUCR_CMD_TX_ALLOC | npages);

	/*
	 * Spin briefly to see if the allocation succeeds.
	 */
	spin_count = TX_ALLOC_WAIT_TIME;
	do {
		if (smc_read_1(sc, IST) & ALLOC_INT) {
			smc_write_1(sc, ACK, ALLOC_INT);
			break;
		}
	} while (--spin_count);

	/*
	 * If the allocation is taking too long, unmask the alloc interrupt
	 * and wait.
	 */
	if (spin_count == 0) {
		sc->smc_mask |= ALLOC_INT;
		if ((ifp->if_capenable & IFCAP_POLLING) == 0)
			smc_write_1(sc, MSK, sc->smc_mask);
		return;
	}

	taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx);
}
示例#8
0
static void
usie_if_tx_callback(struct usb_xfer *xfer, usb_error_t error)
{
	struct usie_softc *sc = usbd_xfer_softc(xfer);
	struct usb_page_cache *pc;
	struct ifnet *ifp = sc->sc_ifp;
	struct mbuf *m;
	uint16_t size;

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_TRANSFERRED:
		DPRINTFN(11, "transfer complete\n");
		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
		ifp->if_opackets++;

		/* fall though */
	case USB_ST_SETUP:
tr_setup:

		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
			break;

		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
		if (m == NULL)
			break;

		if (m->m_pkthdr.len > (MCLBYTES - ETHER_HDR_LEN +
		    ETHER_CRC_LEN - sizeof(sc->sc_txd))) {
			DPRINTF("packet len is too big: %d\n",
			    m->m_pkthdr.len);
			break;
		}
		pc = usbd_xfer_get_frame(xfer, 0);

		sc->sc_txd.hip.len = htobe16(m->m_pkthdr.len +
		    ETHER_HDR_LEN + ETHER_CRC_LEN);
		size = sizeof(sc->sc_txd);

		usbd_copy_in(pc, 0, &sc->sc_txd, size);
		usbd_m_copy_in(pc, size, m, 0, m->m_pkthdr.len);
		usbd_xfer_set_frame_len(xfer, 0, m->m_pkthdr.len +
		    size + ETHER_CRC_LEN);

		BPF_MTAP(ifp, m);

		m_freem(m);

		usbd_transfer_submit(xfer);
		break;

	default:			/* Error */
		DPRINTF("USB transfer error, %s\n",
		    usbd_errstr(error));
		ifp->if_oerrors++;

		if (error != USB_ERR_CANCELLED) {
			usbd_xfer_set_stall(xfer);
			ifp->if_ierrors++;
			goto tr_setup;
		}
		break;
	}
}
示例#9
0
static void
cdce_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
	struct cdce_softc *sc = usbd_xfer_softc(xfer);
	struct ifnet *ifp = uether_getifp(&sc->sc_ue);
	struct mbuf *m;
	struct mbuf *mt;
	uint32_t crc;
	uint8_t x;
	int actlen, aframes;

	usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);

	DPRINTFN(1, "\n");

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_TRANSFERRED:
		DPRINTFN(11, "transfer complete: %u bytes in %u frames\n",
		    actlen, aframes);

		ifp->if_opackets++;

		/* free all previous TX buffers */
		cdce_free_queue(sc->sc_tx_buf, CDCE_FRAMES_MAX);

		/* FALLTHROUGH */
	case USB_ST_SETUP:
tr_setup:
		for (x = 0; x != CDCE_FRAMES_MAX; x++) {

			IFQ_DRV_DEQUEUE(&ifp->if_snd, m);

			if (m == NULL)
				break;

			if (sc->sc_flags & CDCE_FLAG_ZAURUS) {
				/*
				 * Zaurus wants a 32-bit CRC appended
				 * to every frame
				 */

				crc = cdce_m_crc32(m, 0, m->m_pkthdr.len);
				crc = htole32(crc);

				if (!m_append(m, 4, (void *)&crc)) {
					m_freem(m);
					ifp->if_oerrors++;
					continue;
				}
			}
			if (m->m_len != m->m_pkthdr.len) {
				mt = m_defrag(m, M_DONTWAIT);
				if (mt == NULL) {
					m_freem(m);
					ifp->if_oerrors++;
					continue;
				}
				m = mt;
			}
			if (m->m_pkthdr.len > MCLBYTES) {
				m->m_pkthdr.len = MCLBYTES;
			}
			sc->sc_tx_buf[x] = m;
			usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len);

			/*
			 * If there's a BPF listener, bounce a copy of
			 * this frame to him:
			 */
			BPF_MTAP(ifp, m);
		}
		if (x != 0) {
			usbd_xfer_set_frames(xfer, x);

			usbd_transfer_submit(xfer);
		}
		break;

	default:			/* Error */
		DPRINTFN(11, "transfer error, %s\n",
		    usbd_errstr(error));

		/* free all previous TX buffers */
		cdce_free_queue(sc->sc_tx_buf, CDCE_FRAMES_MAX);

		/* count output errors */
		ifp->if_oerrors++;

		if (error != USB_ERR_CANCELLED) {
			/* try to clear stall first */
			usbd_xfer_set_stall(xfer);
			goto tr_setup;
		}
		break;
	}
}
示例#10
0
static uint8_t
cdce_ncm_fill_tx_frames(struct usb_xfer *xfer, uint8_t index)
{
	struct cdce_softc *sc = usbd_xfer_softc(xfer);
	struct ifnet *ifp = uether_getifp(&sc->sc_ue);
	struct usb_page_cache *pc = usbd_xfer_get_frame(xfer, index);
	struct mbuf *m;
	uint32_t rem;
	uint32_t offset;
	uint32_t last_offset;
	uint16_t n;
	uint8_t retval;

	usbd_xfer_set_frame_offset(xfer, index * CDCE_NCM_TX_MAXLEN, index);

	offset = sizeof(sc->sc_ncm.hdr) +
	    sizeof(sc->sc_ncm.dpt) + sizeof(sc->sc_ncm.dp);

	/* Store last valid offset before alignment */
	last_offset = offset;

	/* Align offset */
	offset = CDCE_NCM_ALIGN(sc->sc_ncm.tx_remainder,
	    offset, sc->sc_ncm.tx_modulus);

	/* Zero pad */
	cdce_ncm_tx_zero(pc, last_offset, offset);

	/* buffer full */
	retval = 2;

	for (n = 0; n != sc->sc_ncm.tx_nframe; n++) {

		/* check if end of transmit buffer is reached */

		if (offset >= sc->sc_ncm.tx_max)
			break;

		/* compute maximum buffer size */

		rem = sc->sc_ncm.tx_max - offset;

		IFQ_DRV_DEQUEUE(&(ifp->if_snd), m);

		if (m == NULL) {
			/* buffer not full */
			retval = 1;
			break;
		}

		if (m->m_pkthdr.len > rem) {
			if (n == 0) {
				/* The frame won't fit in our buffer */
				DPRINTFN(1, "Frame too big to be transmitted!\n");
				m_freem(m);
				ifp->if_oerrors++;
				n--;
				continue;
			}
			/* Wait till next buffer becomes ready */
			IFQ_DRV_PREPEND(&(ifp->if_snd), m);
			break;
		}
		usbd_m_copy_in(pc, offset, m, 0, m->m_pkthdr.len);

		USETW(sc->sc_ncm.dp[n].wFrameLength, m->m_pkthdr.len);
		USETW(sc->sc_ncm.dp[n].wFrameIndex, offset);

		/* Update offset */
		offset += m->m_pkthdr.len;

		/* Store last valid offset before alignment */
		last_offset = offset;

		/* Align offset */
		offset = CDCE_NCM_ALIGN(sc->sc_ncm.tx_remainder,
		    offset, sc->sc_ncm.tx_modulus);

		/* Zero pad */
		cdce_ncm_tx_zero(pc, last_offset, offset);

		/*
		 * If there's a BPF listener, bounce a copy
		 * of this frame to him:
		 */
		BPF_MTAP(ifp, m);

		/* Free mbuf */

		m_freem(m);

		/* Pre-increment interface counter */

		ifp->if_opackets++;
	}

	if (n == 0)
		return (0);

	rem = (sizeof(sc->sc_ncm.dpt) + (4 * n) + 4);

	USETW(sc->sc_ncm.dpt.wLength, rem);

	/* zero the rest of the data pointer entries */
	for (; n != CDCE_NCM_SUBFRAMES_MAX; n++) {
		USETW(sc->sc_ncm.dp[n].wFrameLength, 0);
		USETW(sc->sc_ncm.dp[n].wFrameIndex, 0);
	}

	offset = last_offset;

	/* Align offset */
	offset = CDCE_NCM_ALIGN(0, offset, CDCE_NCM_TX_MINLEN);

	/* Optimise, save bandwidth and force short termination */
	if (offset >= sc->sc_ncm.tx_max)
		offset = sc->sc_ncm.tx_max;
	else
		offset ++;

	/* Zero pad */
	cdce_ncm_tx_zero(pc, last_offset, offset);

	/* set frame length */
	usbd_xfer_set_frame_len(xfer, index, offset);

	/* Fill out 16-bit header */
	sc->sc_ncm.hdr.dwSignature[0] = 'N';
	sc->sc_ncm.hdr.dwSignature[1] = 'C';
	sc->sc_ncm.hdr.dwSignature[2] = 'M';
	sc->sc_ncm.hdr.dwSignature[3] = 'H';
	USETW(sc->sc_ncm.hdr.wHeaderLength, sizeof(sc->sc_ncm.hdr));
	USETW(sc->sc_ncm.hdr.wBlockLength, offset);
	USETW(sc->sc_ncm.hdr.wSequence, sc->sc_ncm.tx_seq);
	USETW(sc->sc_ncm.hdr.wDptIndex, sizeof(sc->sc_ncm.hdr));

	sc->sc_ncm.tx_seq++;

	/* Fill out 16-bit frame table header */
	sc->sc_ncm.dpt.dwSignature[0] = 'N';
	sc->sc_ncm.dpt.dwSignature[1] = 'C';
	sc->sc_ncm.dpt.dwSignature[2] = 'M';
	sc->sc_ncm.dpt.dwSignature[3] = '0';
	USETW(sc->sc_ncm.dpt.wNextNdpIndex, 0);		/* reserved */

	usbd_copy_in(pc, 0, &(sc->sc_ncm.hdr), sizeof(sc->sc_ncm.hdr));
	usbd_copy_in(pc, sizeof(sc->sc_ncm.hdr), &(sc->sc_ncm.dpt),
	    sizeof(sc->sc_ncm.dpt));
	usbd_copy_in(pc, sizeof(sc->sc_ncm.hdr) + sizeof(sc->sc_ncm.dpt),
	    &(sc->sc_ncm.dp), sizeof(sc->sc_ncm.dp));
	return (retval);
}
示例#11
0
/**
 * @group dTSEC IFnet routines.
 * @{
 */
void
dtsec_rm_if_start_locked(struct dtsec_softc *sc)
{
	vm_size_t dsize, psize, ssize;
	struct dtsec_rm_frame_info *fi;
	unsigned int qlen, i;
	struct mbuf *m0, *m;
	vm_offset_t vaddr;
	vm_paddr_t paddr;
	t_DpaaFD fd;

	DTSEC_LOCK_ASSERT(sc);
	/* TODO: IFF_DRV_OACTIVE */

	if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) == 0)
		return;

	if ((sc->sc_ifnet->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
		return;

	while (!IFQ_DRV_IS_EMPTY(&sc->sc_ifnet->if_snd)) {
		/* Check length of the TX queue */
		qlen = qman_fqr_get_counter(sc->sc_tx_fqr, 0,
		    e_QM_FQR_COUNTERS_FRAME);

		if (qlen >= DTSEC_MAX_TX_QUEUE_LEN) {
			sc->sc_tx_fqr_full = 1;
			return;
		}

		fi = dtsec_rm_fi_alloc(sc);
		if (fi == NULL)
			return;

		IFQ_DRV_DEQUEUE(&sc->sc_ifnet->if_snd, m0);
		if (m0 == NULL) {
			dtsec_rm_fi_free(sc, fi);
			return;
		}

		i = 0;
		m = m0;
		psize = 0;
		dsize = 0;
		fi->fi_mbuf = m0;
		while (m && i < DPAA_NUM_OF_SG_TABLE_ENTRY) {
			if (m->m_len == 0)
				continue;

			/*
			 * First entry in scatter-gather table is used to keep
			 * pointer to frame info structure.
			 */
			DPAA_SGTE_SET_ADDR(&fi->fi_sgt[i], (void *)fi);
			DPAA_SGTE_SET_LENGTH(&fi->fi_sgt[i], 0);

			DPAA_SGTE_SET_EXTENSION(&fi->fi_sgt[i], 0);
			DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i], 0);
			DPAA_SGTE_SET_BPID(&fi->fi_sgt[i], 0);
			DPAA_SGTE_SET_OFFSET(&fi->fi_sgt[i], 0);
			i++;

			dsize = m->m_len;
			vaddr = (vm_offset_t)m->m_data;
			while (dsize > 0 && i < DPAA_NUM_OF_SG_TABLE_ENTRY) {
				paddr = XX_VirtToPhys((void *)vaddr);
				ssize = PAGE_SIZE - (paddr & PAGE_MASK);
				if (m->m_len < ssize)
					ssize = m->m_len;

				DPAA_SGTE_SET_ADDR(&fi->fi_sgt[i],
				    (void *)vaddr);
				DPAA_SGTE_SET_LENGTH(&fi->fi_sgt[i], ssize);

				DPAA_SGTE_SET_EXTENSION(&fi->fi_sgt[i], 0);
				DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i], 0);
				DPAA_SGTE_SET_BPID(&fi->fi_sgt[i], 0);
				DPAA_SGTE_SET_OFFSET(&fi->fi_sgt[i], 0);

				dsize -= ssize;
				vaddr += ssize;
				psize += ssize;
				i++;
			}

			if (dsize > 0)
				break;

			m = m->m_next;
		}

		/* Check if SG table was constructed properly */
		if (m != NULL || dsize != 0) {
			dtsec_rm_fi_free(sc, fi);
			m_freem(m0);
			continue;
		}

		DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i-1], 1);

		DPAA_FD_SET_ADDR(&fd, fi->fi_sgt);
		DPAA_FD_SET_LENGTH(&fd, psize);
		DPAA_FD_SET_FORMAT(&fd, e_DPAA_FD_FORMAT_TYPE_SHORT_MBSF);

		DPAA_FD_SET_DD(&fd, 0);
		DPAA_FD_SET_PID(&fd, 0);
		DPAA_FD_SET_BPID(&fd, 0);
		DPAA_FD_SET_OFFSET(&fd, 0);
		DPAA_FD_SET_STATUS(&fd, 0);

		DTSEC_UNLOCK(sc);
		if (qman_fqr_enqueue(sc->sc_tx_fqr, 0, &fd) != E_OK) {
			dtsec_rm_fi_free(sc, fi);
			m_freem(m0);
		}
		DTSEC_LOCK(sc);
	}
}
示例#12
0
static void
ipheth_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
	struct ipheth_softc *sc = usbd_xfer_softc(xfer);
	struct ifnet *ifp = uether_getifp(&sc->sc_ue);
	struct usb_page_cache *pc;
	struct mbuf *m;
	uint8_t x;
	int actlen;
	int aframes;

	usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);

	DPRINTFN(1, "\n");

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_TRANSFERRED:
		DPRINTFN(11, "transfer complete: %u bytes in %u frames\n",
		    actlen, aframes);

		ifp->if_opackets++;

		/* free all previous TX buffers */
		ipheth_free_queue(sc->sc_tx_buf, IPHETH_TX_FRAMES_MAX);

		/* FALLTHROUGH */
	case USB_ST_SETUP:
tr_setup:
		for (x = 0; x != IPHETH_TX_FRAMES_MAX; x++) {

			IFQ_DRV_DEQUEUE(&ifp->if_snd, m);

			if (m == NULL)
				break;

			usbd_xfer_set_frame_offset(xfer,
			    x * IPHETH_BUF_SIZE, x);

			pc = usbd_xfer_get_frame(xfer, x);

			sc->sc_tx_buf[x] = m;

			if (m->m_pkthdr.len > IPHETH_BUF_SIZE)
				m->m_pkthdr.len = IPHETH_BUF_SIZE;

			usbd_m_copy_in(pc, 0, m, 0, m->m_pkthdr.len);

			usbd_xfer_set_frame_len(xfer, x, IPHETH_BUF_SIZE);

			if (IPHETH_BUF_SIZE != m->m_pkthdr.len) {
				usbd_frame_zero(pc, m->m_pkthdr.len,
					IPHETH_BUF_SIZE - m->m_pkthdr.len);
			}

			/*
			 * If there's a BPF listener, bounce a copy of
			 * this frame to him:
			 */
			BPF_MTAP(ifp, m);
		}
		if (x != 0) {
			usbd_xfer_set_frames(xfer, x);

			usbd_transfer_submit(xfer);
		}
		break;

	default:			/* Error */
		DPRINTFN(11, "transfer error, %s\n",
		    usbd_errstr(error));

		/* free all previous TX buffers */
		ipheth_free_queue(sc->sc_tx_buf, IPHETH_TX_FRAMES_MAX);

		/* count output errors */
		ifp->if_oerrors++;

		if (error != USB_ERR_CANCELLED) {
			/* try to clear stall first */
			usbd_xfer_set_stall(xfer);
			goto tr_setup;
		}
		break;
	}
}
static int hn_start_locked (struct ifnet *ifp)
{
	int ret = 0;
	hn_softc_t *sc = ifp->if_softc;
	NETVSC_DRIVER_OBJECT *net_drv_obj = &g_netvsc_drv.drv_obj;
	struct device_context *device_ctx = vmbus_get_devctx(sc->hn_dev);

	int i = 0;
	unsigned char *buf;

	NETVSC_PACKET* packet;
	int num_frags = 0;
	int retries = 0;
	struct mbuf *m_head, *m;
	int len = 0;
	int xlen = 0;

	DPRINT_ENTER(NETVSC_DRV);

	while (!IFQ_DRV_IS_EMPTY(&sc->hn_ifp->if_snd)) {
		IFQ_DRV_DEQUEUE(&sc->hn_ifp->if_snd, m_head);
		if (m_head == NULL) {
			break;
		}

		len = 0;
		num_frags = 0;
		xlen = 0;

		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len != 0) {
				num_frags++;
				len += m->m_len;
			}
		}

		DPRINT_DBG(NETVSC_DRV, "xmit packet - len %d", len);

		// Add 1 for skb->data and any additional ones requested
		num_frags += net_drv_obj->AdditionalRequestPageBufferCount;

		// Allocate a netvsc packet based on # of frags.
		buf = malloc(16 + sizeof(NETVSC_PACKET) + 
		    (num_frags * sizeof(PAGE_BUFFER)) + 
		    net_drv_obj->RequestExtSize, 
		    M_DEVBUF, M_ZERO | M_WAITOK);

		if (buf == NULL) {
			DPRINT_ERR(NETVSC_DRV, "unable to allocate NETVSC_PACKET");
			return -1;
		}

		packet = (NETVSC_PACKET *)(buf + 16);
		*(vm_offset_t *)buf = 0;

		packet->Extension = (void*)((unsigned long)packet + 
		    sizeof(NETVSC_PACKET) + (num_frags * sizeof(PAGE_BUFFER))) ;

		// Setup the rndis header
		packet->PageBufferCount = num_frags;

		// TODO: Flush all write buffers/ memory fence ???
		//wmb();
	
		// Initialize it from the mbuf
		packet->TotalDataBufferLength	= len;

		// Start filling in the page buffers starting at
		// AdditionalRequestPageBufferCount offset

		i = net_drv_obj->AdditionalRequestPageBufferCount;
		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len) {
				vm_offset_t paddr = vtophys(mtod(m, vm_offset_t));
				packet->PageBuffers[i].Pfn = paddr >> PAGE_SHIFT;
				packet->PageBuffers[i].Offset = paddr & (PAGE_SIZE - 1);
				packet->PageBuffers[i].Length = m->m_len;
				DPRINT_DBG(NETVSC_DRV, 
						"vaddr: %p, pfn: %llx, Off: %x, len: %x\n", 
						paddr, packet->PageBuffers[i].Pfn, 
						packet->PageBuffers[i].Offset, 
						packet->PageBuffers[i].Length);

				i++;
			}
		}


		// Set the completion routine
		/*
		 * Fixme:  Research the netvsc_xmit_completion() function
		 * and figure out what to do about it.  It is currently too
		 * messed up to port easily.
		 */
		packet->Completion.Send.OnSendCompletion = netvsc_xmit_completion;
		packet->Completion.Send.SendCompletionContext = packet;
		packet->Completion.Send.SendCompletionTid = (ULONG_PTR)m_head;
retry_send:
		critical_enter();
		ret = net_drv_obj->OnSend(&device_ctx->device_obj, packet);
		critical_exit();

		if (ret == 0) {
			ifp->if_opackets++;
			if (ifp->if_bpf)
				bpf_mtap(ifp->if_bpf, m_head);
//			if (ifp->if_timer == 0)
//				ifp->if_timer = 5;
		} else {
			retries++;
			if (retries < 4) {
				DPRINT_ERR(NETVSC_DRV,
				    "unable to send...retrying %d...", retries);
				goto retry_send;
			}

			DPRINT_INFO(NETVSC_DRV, "net device (%p) stopping", sc);
			IF_PREPEND(&ifp->if_snd, m_head);
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;

			ret = -1;
//			net_device_ctx->stats.tx_dropped++;

			// Null it since the caller will free it instead of
			// the completion routine
			packet->Completion.Send.SendCompletionTid = 0;

			// Release the resources since we will not get any
			// send completion
			netvsc_xmit_completion((void*)packet);
		}
	}
示例#14
0
文件: if_ed.c 项目: ChaosJohn/freebsd
static void
ed_start_locked(struct ifnet *ifp)
{
	struct ed_softc *sc = ifp->if_softc;
	struct mbuf *m0, *m;
	bus_size_t buffer;
	int     len;

	ED_ASSERT_LOCKED(sc);
outloop:

	/*
	 * First, see if there are buffered packets and an idle transmitter -
	 * should never happen at this point.
	 */
	if (sc->txb_inuse && (sc->xmit_busy == 0)) {
		printf("ed: packets buffered, but transmitter idle\n");
		ed_xmit(sc);
	}

	/*
	 * See if there is room to put another packet in the buffer.
	 */
	if (sc->txb_inuse == sc->txb_cnt) {

		/*
		 * No room. Indicate this to the outside world and exit.
		 */
		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
		return;
	}
	IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
	if (m == 0) {

		/*
		 * We are using the !OACTIVE flag to indicate to the outside
		 * world that we can accept an additional packet rather than
		 * that the transmitter is _actually_ active. Indeed, the
		 * transmitter may be active, but if we haven't filled all the
		 * buffers with data then we still want to accept more.
		 */
		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
		return;
	}

	/*
	 * Copy the mbuf chain into the transmit buffer
	 */
	m0 = m;

	/* txb_new points to next open buffer slot */
	buffer = sc->mem_start + (sc->txb_new * ED_TXBUF_SIZE * ED_PAGE_SIZE);

	len = sc->sc_write_mbufs(sc, m, buffer);
	if (len == 0) {
		m_freem(m0);
		goto outloop;
	}

	sc->txb_len[sc->txb_new] = max(len, (ETHER_MIN_LEN-ETHER_CRC_LEN));

	sc->txb_inuse++;

	/*
	 * Point to next buffer slot and wrap if necessary.
	 */
	sc->txb_new++;
	if (sc->txb_new == sc->txb_cnt)
		sc->txb_new = 0;

	if (sc->xmit_busy == 0)
		ed_xmit(sc);

	/*
	 * Tap off here if there is a bpf listener.
	 */
	BPF_MTAP(ifp, m0);

	m_freem(m0);

	/*
	 * Loop back to the top to possibly buffer more packets
	 */
	goto outloop;
}
示例#15
0
文件: if_vtbe.c 项目: jashank/freebsd
static void
vtbe_txstart_locked(struct vtbe_softc *sc)
{
	struct virtio_net_hdr_mrg_rxbuf *vnh;
	struct iovec iov[DESC_COUNT];
	struct vqueue_info *vq;
	struct iovec *riov;
	struct ifnet *ifp;
	struct mbuf *m;
	struct uio uio;
	int enqueued;
	int iolen;
	int error;
	int *addr;
	int reg;
	int len;
	int n;

	VTBE_ASSERT_LOCKED(sc);

	/* RX queue */
	vq = &sc->vs_queues[0];
	if (!vq_has_descs(vq)) {
		return;
	}

	ifp = sc->ifp;
	if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
		return;
	}

	enqueued = 0;

	if (!vq_ring_ready(vq))
		return;

	vq->vq_save_used = be16toh(vq->vq_used->idx);

	for (;;) {
		if (!vq_has_descs(vq)) {
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
			break;
		}

		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
		if (m == NULL) {
			break;
		}

		n = vq_getchain(sc->beri_mem_offset, vq, iov,
			DESC_COUNT, NULL);

		KASSERT(n >= 1 && n <= DESC_COUNT,
			("wrong descriptors num %d", n));

		addr = iov[0].iov_base;
		len = iov[0].iov_len;

		vnh = iov[0].iov_base;
		memset(vnh, 0, sc->hdrsize);
		vnh->num_buffers = htobe16(1);

		iov[0].iov_len -= sc->hdrsize;
		iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base +
					sc->hdrsize);
		riov = &iov[0];

		uio.uio_resid = iov[0].iov_len;
		uio.uio_iov = riov;
		uio.uio_segflg = UIO_SYSSPACE;
		uio.uio_iovcnt = 1;
		uio.uio_offset = 0;
		uio.uio_rw = UIO_READ;

		error = m_mbuftouio(&uio, m, 0);
		if (error)
			panic("m_mbuftouio failed\n");

		iolen = (len - iov[0].iov_len - sc->hdrsize);
		vq_relchain(vq, iov, 0, iolen + sc->hdrsize);
		paddr_unmap((void *)addr, len);

		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);

		BPF_MTAP(ifp, m);
		m_freem(m);

		++enqueued;
	}

	if (enqueued != 0) {
		reg = htobe32(VIRTIO_MMIO_INT_VRING);
		WRITE4(sc, VIRTIO_MMIO_INTERRUPT_STATUS, reg);

		PIO_SET(sc->pio_send, Q_INTR, 1);
	}
}
示例#16
0
/*
 * Start a transmit of one or more packets
 */
static int
hn_start_locked(struct ifnet *ifp)
{
	hn_softc_t *sc = ifp->if_softc;
	struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
	uint8_t *buf;
	netvsc_packet *packet;
	struct mbuf *m_head, *m;
	struct mbuf *mc_head = NULL;
	int i;
	int num_frags;
	int len;
	int xlen;
	int rppi_size;
	int retries = 0;
	int ret = 0;

	while (!IFQ_DRV_IS_EMPTY(&sc->hn_ifp->if_snd)) {
		IFQ_DRV_DEQUEUE(&sc->hn_ifp->if_snd, m_head);
		if (m_head == NULL) {
			break;
		}

		len = 0;
		num_frags = 0;
		xlen = 0;

		/* Walk the mbuf list computing total length and num frags */
		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len != 0) {
				num_frags++;
				len += m->m_len;
			}
		}

		/*
		 * Reserve the number of pages requested.  Currently,
		 * one page is reserved for the message in the RNDIS
		 * filter packet
		 */
		num_frags += HV_RF_NUM_TX_RESERVED_PAGE_BUFS;

		/* If exceeds # page_buffers in netvsc_packet */
		if (num_frags > NETVSC_PACKET_MAXPAGE) {
			m_freem(m);

			return (EINVAL);
		}

		rppi_size = 0;
		if (m_head->m_flags & M_VLANTAG) {
			rppi_size = sizeof(rndis_per_packet_info) + 
			    sizeof(ndis_8021q_info);
		}

		/*
		 * Allocate a buffer with space for a netvsc packet plus a
		 * number of reserved areas.  First comes a (currently 16
		 * bytes, currently unused) reserved data area.  Second is
		 * the netvsc_packet, which includes (currently 4) page
		 * buffers.  Third (optional) is a rndis_per_packet_info
		 * struct, but only if a VLAN tag should be inserted into the
		 * Ethernet frame by the Hyper-V infrastructure.  Fourth is
		 * an area reserved for an rndis_filter_packet struct.
		 * Changed malloc to M_NOWAIT to avoid sleep under spin lock.
		 * No longer reserving extra space for page buffers, as they
		 * are already part of the netvsc_packet.
		 */
		buf = malloc(HV_NV_PACKET_OFFSET_IN_BUF +
		    sizeof(netvsc_packet) + rppi_size +
		    sizeof(rndis_filter_packet),
		    M_DEVBUF, M_ZERO | M_NOWAIT);
		if (buf == NULL) {
			m_freem(m);

			return (ENOMEM);
		}

		packet = (netvsc_packet *)(buf + HV_NV_PACKET_OFFSET_IN_BUF);
		*(vm_offset_t *)buf = HV_NV_SC_PTR_OFFSET_IN_BUF;

		/*
		 * extension points to the area reserved for the
		 * rndis_filter_packet, which is placed just after
		 * the netvsc_packet (and rppi struct, if present;
		 * length is updated later).
		 */
		packet->extension = packet + 1;

		/* Set up the rndis header */
		packet->page_buf_count = num_frags;

		/* Initialize it from the mbuf */
		packet->tot_data_buf_len = len;

		/*
		 * If the Hyper-V infrastructure needs to embed a VLAN tag,
		 * initialize netvsc_packet and rppi struct values as needed.
		 */
		if (rppi_size) {
			/* Lower layers need the VLAN TCI */
			packet->vlan_tci = m_head->m_pkthdr.ether_vtag;
		}

		/*
		 * Fill the page buffers with mbuf info starting at index
		 * HV_RF_NUM_TX_RESERVED_PAGE_BUFS.
		 */
		i = HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len) {
				vm_offset_t paddr =
				    vtophys(mtod(m, vm_offset_t));
				packet->page_buffers[i].pfn =
				    paddr >> PAGE_SHIFT;
				packet->page_buffers[i].offset =
				    paddr & (PAGE_SIZE - 1);
				packet->page_buffers[i].length = m->m_len;
				i++;
			}
		}

		/*
		 * If bpf, copy the mbuf chain.  This is less expensive than
		 * it appears; the mbuf clusters are not copied, only their
		 * reference counts are incremented.
		 * Needed to avoid a race condition where the completion
		 * callback is invoked, freeing the mbuf chain, before the
		 * bpf_mtap code has a chance to run.
		 */
		if (ifp->if_bpf) {
			mc_head = m_copypacket(m_head, M_DONTWAIT);
		}
retry_send:
		/* Set the completion routine */
		packet->compl.send.on_send_completion = netvsc_xmit_completion;
		packet->compl.send.send_completion_context = packet;
		packet->compl.send.send_completion_tid = (uint64_t)m_head;

		/* Removed critical_enter(), does not appear necessary */
		ret = hv_rf_on_send(device_ctx, packet);

		if (ret == 0) {
			ifp->if_opackets++;
			/* if bpf && mc_head, call bpf_mtap code */
			if (mc_head) {
				ETHER_BPF_MTAP(ifp, mc_head);
			}
		} else {
			retries++;
			if (retries < 4) {
				goto retry_send;
			}

			IF_PREPEND(&ifp->if_snd, m_head);
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;

			/*
			 * Null the mbuf pointer so the completion function
			 * does not free the mbuf chain.  We just pushed the
			 * mbuf chain back on the if_snd queue.
			 */
			packet->compl.send.send_completion_tid = 0;

			/*
			 * Release the resources since we will not get any
			 * send completion
			 */
			netvsc_xmit_completion(packet);
		}

		/* if bpf && mc_head, free the mbuf chain copy */
		if (mc_head) {
			m_freem(mc_head);
		}
	}