コード例 #1
0
ファイル: if_ntb.c プロジェクト: ele7enxxh/dtrace-pf
static void
ntb_start(struct ifnet *ifp)
{
	struct mbuf *m_head;
	struct ntb_netdev *nt = ifp->if_softc;
	int rc;

	mtx_lock(&nt->tx_lock);
	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
	CTR0(KTR_NTB, "TX: ntb_start");
	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
		CTR1(KTR_NTB, "TX: start mbuf %p", m_head);
		rc = ntb_transport_tx_enqueue(nt->qp, m_head, m_head,
			     m_length(m_head, NULL));
		if (rc != 0) {
			CTR1(KTR_NTB,
			    "TX: could not tx mbuf %p. Returning to snd q",
			    m_head);
			if (rc == EAGAIN) {
				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
				callout_reset(&nt->qp->queue_full, hz / 1000,
				    ntb_qp_full, ifp);
			}
			break;
		}

	}
	mtx_unlock(&nt->tx_lock);
}
コード例 #2
0
ファイル: uinet_if_pcap.c プロジェクト: bigclouds/libuinet
static void
if_pcap_send(void *arg)
{
	struct mbuf *m;
	struct if_pcap_softc *sc = (struct if_pcap_softc *)arg;
	struct ifnet *ifp = sc->ifp;
	uint8_t copybuf[2048];
	uint8_t *pkt;
	unsigned int pktlen;

	if (sc->uif->cpu >= 0)
		sched_bind(sc->tx_thread, sc->uif->cpu);

	while (1) {
		mtx_lock(&sc->tx_lock);
		while (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
			mtx_sleep(&ifp->if_drv_flags, &sc->tx_lock, 0, "wtxlk", 0);
		}
		mtx_unlock(&sc->tx_lock);
	
		while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
			IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
			pktlen = m_length(m, NULL);

			ifp->if_opackets++;

			if (!sc->isfile && (pktlen <= sizeof(copybuf))) {			
				if (NULL == m->m_next) {
					/* all in one piece - avoid copy */
					pkt = mtod(m, uint8_t *);
					ifp->if_ozcopies++;
				} else {
					pkt = copybuf;
					m_copydata(m, 0, pktlen, pkt);
					ifp->if_ocopies++;
				}

				if (0 != if_pcap_sendpacket(sc->pcap_host_ctx, pkt, pktlen))
					ifp->if_oerrors++;
			} else {
				if (sc->isfile)
コード例 #3
0
ファイル: if_kr.c プロジェクト: edgar-pek/PerspicuOS
static void
kr_start_locked(struct ifnet *ifp)
{
	struct kr_softc		*sc;
	struct mbuf		*m_head;
	int			enq;

	sc = ifp->if_softc;

	KR_LOCK_ASSERT(sc);

	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
	    IFF_DRV_RUNNING || sc->kr_link_status == 0 )
		return;

	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
	    sc->kr_cdata.kr_tx_cnt < KR_TX_RING_CNT - 2; ) {
		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
		if (m_head == NULL)
			break;
		/*
		 * Pack the data into the transmit ring. If we
		 * don't have room, set the OACTIVE flag and wait
		 * for the NIC to drain the ring.
		 */
		if (kr_encap(sc, &m_head)) {
			if (m_head == NULL)
				break;
			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
			break;
		}

		enq++;
		/*
		 * If there's a BPF listener, bounce a copy of this frame
		 * to him.
		 */
		ETHER_BPF_MTAP(ifp, m_head);
	}
}
コード例 #4
0
ファイル: if_dtsec_im.c プロジェクト: tomtor/freebsd
/**
 * @group dTSEC IFnet routines.
 * @{
 */
void
dtsec_im_if_start_locked(struct dtsec_softc *sc)
{
    uint8_t *buffer;
    uint16_t length;
    struct mbuf *m;
    int error;

    DTSEC_LOCK_ASSERT(sc);
    /* TODO: IFF_DRV_OACTIVE */

    if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) == 0)
        return;

    if ((sc->sc_ifnet->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
        return;

    while (!IFQ_DRV_IS_EMPTY(&sc->sc_ifnet->if_snd)) {
        IFQ_DRV_DEQUEUE(&sc->sc_ifnet->if_snd, m);
        if (m == NULL)
            break;

        length = m_length(m, NULL);
        buffer = XX_MallocSmart(length, 0, sizeof(void *));
        if (!buffer) {
            m_freem(m);
            break;
        }

        m_copydata(m, 0, length, buffer);
        m_freem(m);

        error = FM_PORT_ImTx(sc->sc_txph, buffer, length, TRUE, buffer);
        if (error != E_OK) {
            /* TODO: Ring full */
            XX_FreeSmart(buffer);
            break;
        }
    }
}
コード例 #5
0
ファイル: if_dtsec_rm.c プロジェクト: 2asoft/freebsd
/**
 * @group dTSEC IFnet routines.
 * @{
 */
void
dtsec_rm_if_start_locked(struct dtsec_softc *sc)
{
	vm_size_t dsize, psize, ssize;
	struct dtsec_rm_frame_info *fi;
	unsigned int qlen, i;
	struct mbuf *m0, *m;
	vm_offset_t vaddr;
	vm_paddr_t paddr;
	t_DpaaFD fd;

	DTSEC_LOCK_ASSERT(sc);
	/* TODO: IFF_DRV_OACTIVE */

	if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) == 0)
		return;

	if ((sc->sc_ifnet->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
		return;

	while (!IFQ_DRV_IS_EMPTY(&sc->sc_ifnet->if_snd)) {
		/* Check length of the TX queue */
		qlen = qman_fqr_get_counter(sc->sc_tx_fqr, 0,
		    e_QM_FQR_COUNTERS_FRAME);

		if (qlen >= DTSEC_MAX_TX_QUEUE_LEN) {
			sc->sc_tx_fqr_full = 1;
			return;
		}

		fi = dtsec_rm_fi_alloc(sc);
		if (fi == NULL)
			return;

		IFQ_DRV_DEQUEUE(&sc->sc_ifnet->if_snd, m0);
		if (m0 == NULL) {
			dtsec_rm_fi_free(sc, fi);
			return;
		}

		i = 0;
		m = m0;
		psize = 0;
		dsize = 0;
		fi->fi_mbuf = m0;
		while (m && i < DPAA_NUM_OF_SG_TABLE_ENTRY) {
			if (m->m_len == 0)
				continue;

			/*
			 * First entry in scatter-gather table is used to keep
			 * pointer to frame info structure.
			 */
			DPAA_SGTE_SET_ADDR(&fi->fi_sgt[i], (void *)fi);
			DPAA_SGTE_SET_LENGTH(&fi->fi_sgt[i], 0);

			DPAA_SGTE_SET_EXTENSION(&fi->fi_sgt[i], 0);
			DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i], 0);
			DPAA_SGTE_SET_BPID(&fi->fi_sgt[i], 0);
			DPAA_SGTE_SET_OFFSET(&fi->fi_sgt[i], 0);
			i++;

			dsize = m->m_len;
			vaddr = (vm_offset_t)m->m_data;
			while (dsize > 0 && i < DPAA_NUM_OF_SG_TABLE_ENTRY) {
				paddr = XX_VirtToPhys((void *)vaddr);
				ssize = PAGE_SIZE - (paddr & PAGE_MASK);
				if (m->m_len < ssize)
					ssize = m->m_len;

				DPAA_SGTE_SET_ADDR(&fi->fi_sgt[i],
				    (void *)vaddr);
				DPAA_SGTE_SET_LENGTH(&fi->fi_sgt[i], ssize);

				DPAA_SGTE_SET_EXTENSION(&fi->fi_sgt[i], 0);
				DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i], 0);
				DPAA_SGTE_SET_BPID(&fi->fi_sgt[i], 0);
				DPAA_SGTE_SET_OFFSET(&fi->fi_sgt[i], 0);

				dsize -= ssize;
				vaddr += ssize;
				psize += ssize;
				i++;
			}

			if (dsize > 0)
				break;

			m = m->m_next;
		}

		/* Check if SG table was constructed properly */
		if (m != NULL || dsize != 0) {
			dtsec_rm_fi_free(sc, fi);
			m_freem(m0);
			continue;
		}

		DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i-1], 1);

		DPAA_FD_SET_ADDR(&fd, fi->fi_sgt);
		DPAA_FD_SET_LENGTH(&fd, psize);
		DPAA_FD_SET_FORMAT(&fd, e_DPAA_FD_FORMAT_TYPE_SHORT_MBSF);

		DPAA_FD_SET_DD(&fd, 0);
		DPAA_FD_SET_PID(&fd, 0);
		DPAA_FD_SET_BPID(&fd, 0);
		DPAA_FD_SET_OFFSET(&fd, 0);
		DPAA_FD_SET_STATUS(&fd, 0);

		DTSEC_UNLOCK(sc);
		if (qman_fqr_enqueue(sc->sc_tx_fqr, 0, &fd) != E_OK) {
			dtsec_rm_fi_free(sc, fi);
			m_freem(m0);
		}
		DTSEC_LOCK(sc);
	}
}
static int hn_start_locked (struct ifnet *ifp)
{
	int ret = 0;
	hn_softc_t *sc = ifp->if_softc;
	NETVSC_DRIVER_OBJECT *net_drv_obj = &g_netvsc_drv.drv_obj;
	struct device_context *device_ctx = vmbus_get_devctx(sc->hn_dev);

	int i = 0;
	unsigned char *buf;

	NETVSC_PACKET* packet;
	int num_frags = 0;
	int retries = 0;
	struct mbuf *m_head, *m;
	int len = 0;
	int xlen = 0;

	DPRINT_ENTER(NETVSC_DRV);

	while (!IFQ_DRV_IS_EMPTY(&sc->hn_ifp->if_snd)) {
		IFQ_DRV_DEQUEUE(&sc->hn_ifp->if_snd, m_head);
		if (m_head == NULL) {
			break;
		}

		len = 0;
		num_frags = 0;
		xlen = 0;

		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len != 0) {
				num_frags++;
				len += m->m_len;
			}
		}

		DPRINT_DBG(NETVSC_DRV, "xmit packet - len %d", len);

		// Add 1 for skb->data and any additional ones requested
		num_frags += net_drv_obj->AdditionalRequestPageBufferCount;

		// Allocate a netvsc packet based on # of frags.
		buf = malloc(16 + sizeof(NETVSC_PACKET) + 
		    (num_frags * sizeof(PAGE_BUFFER)) + 
		    net_drv_obj->RequestExtSize, 
		    M_DEVBUF, M_ZERO | M_WAITOK);

		if (buf == NULL) {
			DPRINT_ERR(NETVSC_DRV, "unable to allocate NETVSC_PACKET");
			return -1;
		}

		packet = (NETVSC_PACKET *)(buf + 16);
		*(vm_offset_t *)buf = 0;

		packet->Extension = (void*)((unsigned long)packet + 
		    sizeof(NETVSC_PACKET) + (num_frags * sizeof(PAGE_BUFFER))) ;

		// Setup the rndis header
		packet->PageBufferCount = num_frags;

		// TODO: Flush all write buffers/ memory fence ???
		//wmb();
	
		// Initialize it from the mbuf
		packet->TotalDataBufferLength	= len;

		// Start filling in the page buffers starting at
		// AdditionalRequestPageBufferCount offset

		i = net_drv_obj->AdditionalRequestPageBufferCount;
		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len) {
				vm_offset_t paddr = vtophys(mtod(m, vm_offset_t));
				packet->PageBuffers[i].Pfn = paddr >> PAGE_SHIFT;
				packet->PageBuffers[i].Offset = paddr & (PAGE_SIZE - 1);
				packet->PageBuffers[i].Length = m->m_len;
				DPRINT_DBG(NETVSC_DRV, 
						"vaddr: %p, pfn: %llx, Off: %x, len: %x\n", 
						paddr, packet->PageBuffers[i].Pfn, 
						packet->PageBuffers[i].Offset, 
						packet->PageBuffers[i].Length);

				i++;
			}
		}


		// Set the completion routine
		/*
		 * Fixme:  Research the netvsc_xmit_completion() function
		 * and figure out what to do about it.  It is currently too
		 * messed up to port easily.
		 */
		packet->Completion.Send.OnSendCompletion = netvsc_xmit_completion;
		packet->Completion.Send.SendCompletionContext = packet;
		packet->Completion.Send.SendCompletionTid = (ULONG_PTR)m_head;
retry_send:
		critical_enter();
		ret = net_drv_obj->OnSend(&device_ctx->device_obj, packet);
		critical_exit();

		if (ret == 0) {
			ifp->if_opackets++;
			if (ifp->if_bpf)
				bpf_mtap(ifp->if_bpf, m_head);
//			if (ifp->if_timer == 0)
//				ifp->if_timer = 5;
		} else {
			retries++;
			if (retries < 4) {
				DPRINT_ERR(NETVSC_DRV,
				    "unable to send...retrying %d...", retries);
				goto retry_send;
			}

			DPRINT_INFO(NETVSC_DRV, "net device (%p) stopping", sc);
			IF_PREPEND(&ifp->if_snd, m_head);
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;

			ret = -1;
//			net_device_ctx->stats.tx_dropped++;

			// Null it since the caller will free it instead of
			// the completion routine
			packet->Completion.Send.SendCompletionTid = 0;

			// Release the resources since we will not get any
			// send completion
			netvsc_xmit_completion((void*)packet);
		}
	}
コード例 #7
0
/*
 * Start a transmit of one or more packets
 */
static int
hn_start_locked(struct ifnet *ifp)
{
	hn_softc_t *sc = ifp->if_softc;
	struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
	uint8_t *buf;
	netvsc_packet *packet;
	struct mbuf *m_head, *m;
	struct mbuf *mc_head = NULL;
	int i;
	int num_frags;
	int len;
	int xlen;
	int rppi_size;
	int retries = 0;
	int ret = 0;

	while (!IFQ_DRV_IS_EMPTY(&sc->hn_ifp->if_snd)) {
		IFQ_DRV_DEQUEUE(&sc->hn_ifp->if_snd, m_head);
		if (m_head == NULL) {
			break;
		}

		len = 0;
		num_frags = 0;
		xlen = 0;

		/* Walk the mbuf list computing total length and num frags */
		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len != 0) {
				num_frags++;
				len += m->m_len;
			}
		}

		/*
		 * Reserve the number of pages requested.  Currently,
		 * one page is reserved for the message in the RNDIS
		 * filter packet
		 */
		num_frags += HV_RF_NUM_TX_RESERVED_PAGE_BUFS;

		/* If exceeds # page_buffers in netvsc_packet */
		if (num_frags > NETVSC_PACKET_MAXPAGE) {
			m_freem(m);

			return (EINVAL);
		}

		rppi_size = 0;
		if (m_head->m_flags & M_VLANTAG) {
			rppi_size = sizeof(rndis_per_packet_info) + 
			    sizeof(ndis_8021q_info);
		}

		/*
		 * Allocate a buffer with space for a netvsc packet plus a
		 * number of reserved areas.  First comes a (currently 16
		 * bytes, currently unused) reserved data area.  Second is
		 * the netvsc_packet, which includes (currently 4) page
		 * buffers.  Third (optional) is a rndis_per_packet_info
		 * struct, but only if a VLAN tag should be inserted into the
		 * Ethernet frame by the Hyper-V infrastructure.  Fourth is
		 * an area reserved for an rndis_filter_packet struct.
		 * Changed malloc to M_NOWAIT to avoid sleep under spin lock.
		 * No longer reserving extra space for page buffers, as they
		 * are already part of the netvsc_packet.
		 */
		buf = malloc(HV_NV_PACKET_OFFSET_IN_BUF +
		    sizeof(netvsc_packet) + rppi_size +
		    sizeof(rndis_filter_packet),
		    M_DEVBUF, M_ZERO | M_NOWAIT);
		if (buf == NULL) {
			m_freem(m);

			return (ENOMEM);
		}

		packet = (netvsc_packet *)(buf + HV_NV_PACKET_OFFSET_IN_BUF);
		*(vm_offset_t *)buf = HV_NV_SC_PTR_OFFSET_IN_BUF;

		/*
		 * extension points to the area reserved for the
		 * rndis_filter_packet, which is placed just after
		 * the netvsc_packet (and rppi struct, if present;
		 * length is updated later).
		 */
		packet->extension = packet + 1;

		/* Set up the rndis header */
		packet->page_buf_count = num_frags;

		/* Initialize it from the mbuf */
		packet->tot_data_buf_len = len;

		/*
		 * If the Hyper-V infrastructure needs to embed a VLAN tag,
		 * initialize netvsc_packet and rppi struct values as needed.
		 */
		if (rppi_size) {
			/* Lower layers need the VLAN TCI */
			packet->vlan_tci = m_head->m_pkthdr.ether_vtag;
		}

		/*
		 * Fill the page buffers with mbuf info starting at index
		 * HV_RF_NUM_TX_RESERVED_PAGE_BUFS.
		 */
		i = HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len) {
				vm_offset_t paddr =
				    vtophys(mtod(m, vm_offset_t));
				packet->page_buffers[i].pfn =
				    paddr >> PAGE_SHIFT;
				packet->page_buffers[i].offset =
				    paddr & (PAGE_SIZE - 1);
				packet->page_buffers[i].length = m->m_len;
				i++;
			}
		}

		/*
		 * If bpf, copy the mbuf chain.  This is less expensive than
		 * it appears; the mbuf clusters are not copied, only their
		 * reference counts are incremented.
		 * Needed to avoid a race condition where the completion
		 * callback is invoked, freeing the mbuf chain, before the
		 * bpf_mtap code has a chance to run.
		 */
		if (ifp->if_bpf) {
			mc_head = m_copypacket(m_head, M_DONTWAIT);
		}
retry_send:
		/* Set the completion routine */
		packet->compl.send.on_send_completion = netvsc_xmit_completion;
		packet->compl.send.send_completion_context = packet;
		packet->compl.send.send_completion_tid = (uint64_t)m_head;

		/* Removed critical_enter(), does not appear necessary */
		ret = hv_rf_on_send(device_ctx, packet);

		if (ret == 0) {
			ifp->if_opackets++;
			/* if bpf && mc_head, call bpf_mtap code */
			if (mc_head) {
				ETHER_BPF_MTAP(ifp, mc_head);
			}
		} else {
			retries++;
			if (retries < 4) {
				goto retry_send;
			}

			IF_PREPEND(&ifp->if_snd, m_head);
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;

			/*
			 * Null the mbuf pointer so the completion function
			 * does not free the mbuf chain.  We just pushed the
			 * mbuf chain back on the if_snd queue.
			 */
			packet->compl.send.send_completion_tid = 0;

			/*
			 * Release the resources since we will not get any
			 * send completion
			 */
			netvsc_xmit_completion(packet);
		}

		/* if bpf && mc_head, free the mbuf chain copy */
		if (mc_head) {
			m_freem(mc_head);
		}
	}