示例#1
0
/*---------------------------------------------------------------------------*
 *	i4bputqueue_hipri - put message into front of queue to userland
 *---------------------------------------------------------------------------*/
void
i4bputqueue_hipri(struct mbuf *m)
{
    if(!openflag)
    {
        i4b_Dfreembuf(m);
        return;
    }

    crit_enter();

    if(IF_QFULL(&i4b_rdqueue))
    {
        struct mbuf *m1;
        IF_DEQUEUE(&i4b_rdqueue, m1);
        i4b_Dfreembuf(m1);
        NDBGL4(L4_ERR, "ERROR, queue full, removing entry!");
    }

    IF_PREPEND(&i4b_rdqueue, m);

    crit_exit();

    if(readflag)
    {
        readflag = 0;
        wakeup((caddr_t) &i4b_rdqueue);
    }

    KNOTE(&kq_rd_info.ki_note, 0);
}
示例#2
0
static void
bt3c_send(node_p node, hook_p hook, void *arg, int completed)
{
	bt3c_softc_p	 sc = (bt3c_softc_p) NG_NODE_PRIVATE(node);
	struct mbuf	*m = NULL;
	int		 i, wrote, len;

	if (sc == NULL)
		return;

	if (completed)
		sc->flags &= ~BT3C_XMIT;

	if (sc->flags & BT3C_XMIT)
		return;

	bt3c_set_address(sc, 0x7080);

	for (wrote = 0; wrote < BT3C_FIFO_SIZE; ) {
		IF_DEQUEUE(&sc->outq, m);
		if (m == NULL)
			break;

		while (m != NULL) {
			len = min((BT3C_FIFO_SIZE - wrote), m->m_len);

			for (i = 0; i < len; i++)
				bt3c_write_data(sc, m->m_data[i]);

			wrote += len;
			m->m_data += len;
			m->m_len -= len;

			if (m->m_len > 0)
				break;

			m = m_free(m);
		}

		if (m != NULL) {
			IF_PREPEND(&sc->outq, m);
			break;
		}

		NG_BT3C_STAT_PCKTS_SENT(sc->stat);
	}

	if (wrote > 0) {
		NG_BT3C_INFO(sc->dev, "Wrote %d bytes\n", wrote);
		NG_BT3C_STAT_BYTES_SENT(sc->stat, wrote);

		bt3c_write(sc, 0x7005, wrote);
		sc->flags |= BT3C_XMIT;
	}
} /* bt3c_send */
/*
 * Encapsulate a packet of type family for the local net.
 */
static void
snstart(struct ifnet *ifp)
{
	struct sn_softc	*sc = ifp->if_softc;
	struct mbuf	*m;
	int		mtd_next;

	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		return;

outloop:
	/* Check for room in the xmit buffer. */
	if ((mtd_next = (sc->mtd_free + 1)) == NTDA)
		mtd_next = 0;

	if (mtd_next == sc->mtd_hw) {
		ifp->if_flags |= IFF_OACTIVE;
		return;
	}

	IF_DEQUEUE(&ifp->if_snd, m);
	if (m == 0)
		return;

	/* We need the header for m_pkthdr.len. */
	if ((m->m_flags & M_PKTHDR) == 0)
		panic("%s: snstart: no header mbuf", device_xname(sc->sc_dev));

	/*
	 * If bpf is listening on this interface, let it
	 * see the packet before we commit it to the wire.
	 */
	bpf_mtap(ifp, m);

	/*
	 * If there is nothing in the o/p queue, and there is room in
	 * the Tx ring, then send the packet directly.  Otherwise append
	 * it to the o/p queue.
	 */
	if ((sonicput(sc, m, mtd_next)) == 0) {
		IF_PREPEND(&ifp->if_snd, m);
		return;
	}

	sc->mtd_prev = sc->mtd_free;
	sc->mtd_free = mtd_next;

	ifp->if_opackets++;		/* # of pkts */

	/* Jump back for possibly more punishment. */
	goto outloop;
}
示例#4
0
文件: if_cdce.c 项目: MarginC/kame
Static void
cdce_start(struct ifnet *ifp)
{
    struct cdce_softc	*sc;
    struct mbuf		*m_head = NULL;

    sc = ifp->if_softc;
    CDCE_LOCK(sc);


    if (sc->cdce_dying ||
            ifp->if_flags & IFF_OACTIVE ||
            !(ifp->if_flags & IFF_RUNNING)) {
        CDCE_UNLOCK(sc);
        return;
    }

    IF_DEQUEUE(&ifp->if_snd, m_head);
    if (m_head == NULL) {
        CDCE_UNLOCK(sc);
        return;
    }

    if (cdce_encap(sc, m_head, 0)) {
        IF_PREPEND(&ifp->if_snd, m_head);
        ifp->if_flags |= IFF_OACTIVE;
        CDCE_UNLOCK(sc);
        return;
    }

    BPF_MTAP(ifp, m_head);

    ifp->if_flags |= IFF_OACTIVE;

    CDCE_UNLOCK(sc);

    return;
}
示例#5
0
static int iavc_receive_start(iavc_softc_t *sc)
{
    struct mbuf *m = i4b_Dgetmbuf(3);
    u_int8_t *p;

    if (sc->sc_blocked && sc->sc_state == IAVC_UP)
	printf("%s: receive_start\n", device_xname(&sc->sc_dev));

    if (!m) {
	aprint_error_dev(&sc->sc_dev, "can't get memory\n");
	return (ENOMEM);
    }

    /*
     * byte  0x73 = SEND_POLLACK
     */

    p = amcc_put_byte(mtod(m, u_int8_t*), 0);
    p = amcc_put_byte(p, 0);
    p = amcc_put_byte(p, SEND_POLLACK);

    IF_PREPEND(&sc->sc_txq, m);

    NDBGL4(L4_IAVCDBG, "%s: blocked = %d, state = %d",
      device_xname(&sc->sc_dev), sc->sc_blocked, sc->sc_state);

    sc->sc_blocked = 0;
    iavc_start_tx(sc);

    /* If this was our first START, register our readiness */
    if (sc->sc_state != IAVC_UP) {
	sc->sc_state = IAVC_UP;
	capi_ll_control(&sc->sc_capi, CAPI_CTRL_READY, 1);
    }

    return 0;
}
示例#6
0
/*---------------------------------------------------------------------------*
 *	i4bputqueue_hipri - put message into front of queue to userland
 *---------------------------------------------------------------------------*/
void
i4bputqueue_hipri(struct mbuf *m)
{
	int x;
	
	if(!openflag)
	{
		i4b_Dfreembuf(m);
		return;
	}

	x = splimp();
	
	if(IF_QFULL(&i4b_rdqueue))
	{
		struct mbuf *m1;
		IF_DEQUEUE(&i4b_rdqueue, m1);
		i4b_Dfreembuf(m1);
		DBGL4(L4_ERR, "i4bputqueue", ("ERROR, queue full, removing entry!\n"));
	}

	IF_PREPEND(&i4b_rdqueue, m);

	splx(x);	

	if(readflag)
	{
		readflag = 0;
		wakeup((caddr_t) &i4b_rdqueue);
	}

	if(selflag)
	{
		selflag = 0;
		selwakeup(&select_rd_info);
	}
}
示例#7
0
/*---------------------------------------------------------------------------*
 *	i4bputqueue_hipri - put message into front of queue to userland
 *---------------------------------------------------------------------------*/
void
i4bputqueue_hipri(struct mbuf *m)
{
	int x;

	if(!openflag)
	{
		i4b_Dfreembuf(m);
		return;
	}

	x = splnet();

	if(IF_QFULL(&i4b_rdqueue))
	{
		struct mbuf *m1;
		IF_DEQUEUE(&i4b_rdqueue, m1);
		i4b_Dfreembuf(m1);
		NDBGL4(L4_ERR, "ERROR, queue full, removing entry!");
	}

	IF_PREPEND(&i4b_rdqueue, m);

	splx(x);

	if(readflag)
	{
		readflag = 0;
		wakeup((void *) &i4b_rdqueue);
	}

	if(selflag)
	{
		selflag = 0;
		selnotify(&select_rd_info, 0, 0);
	}
}
static int hn_start_locked (struct ifnet *ifp)
{
	int ret = 0;
	hn_softc_t *sc = ifp->if_softc;
	NETVSC_DRIVER_OBJECT *net_drv_obj = &g_netvsc_drv.drv_obj;
	struct device_context *device_ctx = vmbus_get_devctx(sc->hn_dev);

	int i = 0;
	unsigned char *buf;

	NETVSC_PACKET* packet;
	int num_frags = 0;
	int retries = 0;
	struct mbuf *m_head, *m;
	int len = 0;
	int xlen = 0;

	DPRINT_ENTER(NETVSC_DRV);

	while (!IFQ_DRV_IS_EMPTY(&sc->hn_ifp->if_snd)) {
		IFQ_DRV_DEQUEUE(&sc->hn_ifp->if_snd, m_head);
		if (m_head == NULL) {
			break;
		}

		len = 0;
		num_frags = 0;
		xlen = 0;

		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len != 0) {
				num_frags++;
				len += m->m_len;
			}
		}

		DPRINT_DBG(NETVSC_DRV, "xmit packet - len %d", len);

		// Add 1 for skb->data and any additional ones requested
		num_frags += net_drv_obj->AdditionalRequestPageBufferCount;

		// Allocate a netvsc packet based on # of frags.
		buf = malloc(16 + sizeof(NETVSC_PACKET) + 
		    (num_frags * sizeof(PAGE_BUFFER)) + 
		    net_drv_obj->RequestExtSize, 
		    M_DEVBUF, M_ZERO | M_WAITOK);

		if (buf == NULL) {
			DPRINT_ERR(NETVSC_DRV, "unable to allocate NETVSC_PACKET");
			return -1;
		}

		packet = (NETVSC_PACKET *)(buf + 16);
		*(vm_offset_t *)buf = 0;

		packet->Extension = (void*)((unsigned long)packet + 
		    sizeof(NETVSC_PACKET) + (num_frags * sizeof(PAGE_BUFFER))) ;

		// Setup the rndis header
		packet->PageBufferCount = num_frags;

		// TODO: Flush all write buffers/ memory fence ???
		//wmb();
	
		// Initialize it from the mbuf
		packet->TotalDataBufferLength	= len;

		// Start filling in the page buffers starting at
		// AdditionalRequestPageBufferCount offset

		i = net_drv_obj->AdditionalRequestPageBufferCount;
		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len) {
				vm_offset_t paddr = vtophys(mtod(m, vm_offset_t));
				packet->PageBuffers[i].Pfn = paddr >> PAGE_SHIFT;
				packet->PageBuffers[i].Offset = paddr & (PAGE_SIZE - 1);
				packet->PageBuffers[i].Length = m->m_len;
				DPRINT_DBG(NETVSC_DRV, 
						"vaddr: %p, pfn: %llx, Off: %x, len: %x\n", 
						paddr, packet->PageBuffers[i].Pfn, 
						packet->PageBuffers[i].Offset, 
						packet->PageBuffers[i].Length);

				i++;
			}
		}


		// Set the completion routine
		/*
		 * Fixme:  Research the netvsc_xmit_completion() function
		 * and figure out what to do about it.  It is currently too
		 * messed up to port easily.
		 */
		packet->Completion.Send.OnSendCompletion = netvsc_xmit_completion;
		packet->Completion.Send.SendCompletionContext = packet;
		packet->Completion.Send.SendCompletionTid = (ULONG_PTR)m_head;
retry_send:
		critical_enter();
		ret = net_drv_obj->OnSend(&device_ctx->device_obj, packet);
		critical_exit();

		if (ret == 0) {
			ifp->if_opackets++;
			if (ifp->if_bpf)
				bpf_mtap(ifp->if_bpf, m_head);
//			if (ifp->if_timer == 0)
//				ifp->if_timer = 5;
		} else {
			retries++;
			if (retries < 4) {
				DPRINT_ERR(NETVSC_DRV,
				    "unable to send...retrying %d...", retries);
				goto retry_send;
			}

			DPRINT_INFO(NETVSC_DRV, "net device (%p) stopping", sc);
			IF_PREPEND(&ifp->if_snd, m_head);
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;

			ret = -1;
//			net_device_ctx->stats.tx_dropped++;

			// Null it since the caller will free it instead of
			// the completion routine
			packet->Completion.Send.SendCompletionTid = 0;

			// Release the resources since we will not get any
			// send completion
			netvsc_xmit_completion((void*)packet);
		}
	}
示例#9
0
static void
pdq_ifstart_locked(struct ifnet *ifp)
{
    pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
    struct mbuf *m;
    int tx = 0;

    PDQ_LOCK_ASSERT(sc);
    if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
	return;

    if (sc->timer == 0)
	sc->timer = PDQ_OS_TX_TIMEOUT;

    if ((sc->sc_pdq->pdq_flags & PDQ_TXOK) == 0) {
	PDQ_IFNET(sc)->if_drv_flags |= IFF_DRV_OACTIVE;
	return;
    }
    sc->sc_flags |= PDQIF_DOWNCALL;
    for (;; tx = 1) {
	IF_DEQUEUE(&ifp->if_snd, m);
	if (m == NULL)
	    break;
#if defined(PDQ_BUS_DMA) && !defined(PDQ_BUS_DMA_NOTX)
	if ((m->m_flags & M_HASTXDMAMAP) == 0) {
	    bus_dmamap_t map;
	    if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
		m->m_data[0] = PDQ_FDDI_PH0;
		m->m_data[1] = PDQ_FDDI_PH1;
		m->m_data[2] = PDQ_FDDI_PH2;
	    }
	    if (!bus_dmamap_create(sc->sc_dmatag, m->m_pkthdr.len, 255,
				   m->m_pkthdr.len, 0, BUS_DMA_NOWAIT, &map)) {
		if (!bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
					  BUS_DMA_WRITE|BUS_DMA_NOWAIT)) {
		    bus_dmamap_sync(sc->sc_dmatag, map, 0, m->m_pkthdr.len,
				    BUS_DMASYNC_PREWRITE);
		    M_SETCTX(m, map);
		    m->m_flags |= M_HASTXDMAMAP;
		}
	    }
	    if ((m->m_flags & M_HASTXDMAMAP) == 0)
		break;
	}
#else
	if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
	    m->m_data[0] = PDQ_FDDI_PH0;
	    m->m_data[1] = PDQ_FDDI_PH1;
	    m->m_data[2] = PDQ_FDDI_PH2;
	}
#endif

	if (pdq_queue_transmit_data(sc->sc_pdq, m) == PDQ_FALSE)
	    break;
    }
    if (m != NULL) {
	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
	IF_PREPEND(&ifp->if_snd, m);
    }
    if (tx)
	PDQ_DO_TYPE2_PRODUCER(sc->sc_pdq);
    sc->sc_flags &= ~PDQIF_DOWNCALL;
}
示例#10
0
/*
 * Start a transmit of one or more packets
 */
static int
hn_start_locked(struct ifnet *ifp)
{
	hn_softc_t *sc = ifp->if_softc;
	struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
	uint8_t *buf;
	netvsc_packet *packet;
	struct mbuf *m_head, *m;
	struct mbuf *mc_head = NULL;
	int i;
	int num_frags;
	int len;
	int xlen;
	int rppi_size;
	int retries = 0;
	int ret = 0;

	while (!IFQ_DRV_IS_EMPTY(&sc->hn_ifp->if_snd)) {
		IFQ_DRV_DEQUEUE(&sc->hn_ifp->if_snd, m_head);
		if (m_head == NULL) {
			break;
		}

		len = 0;
		num_frags = 0;
		xlen = 0;

		/* Walk the mbuf list computing total length and num frags */
		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len != 0) {
				num_frags++;
				len += m->m_len;
			}
		}

		/*
		 * Reserve the number of pages requested.  Currently,
		 * one page is reserved for the message in the RNDIS
		 * filter packet
		 */
		num_frags += HV_RF_NUM_TX_RESERVED_PAGE_BUFS;

		/* If exceeds # page_buffers in netvsc_packet */
		if (num_frags > NETVSC_PACKET_MAXPAGE) {
			m_freem(m);

			return (EINVAL);
		}

		rppi_size = 0;
		if (m_head->m_flags & M_VLANTAG) {
			rppi_size = sizeof(rndis_per_packet_info) + 
			    sizeof(ndis_8021q_info);
		}

		/*
		 * Allocate a buffer with space for a netvsc packet plus a
		 * number of reserved areas.  First comes a (currently 16
		 * bytes, currently unused) reserved data area.  Second is
		 * the netvsc_packet, which includes (currently 4) page
		 * buffers.  Third (optional) is a rndis_per_packet_info
		 * struct, but only if a VLAN tag should be inserted into the
		 * Ethernet frame by the Hyper-V infrastructure.  Fourth is
		 * an area reserved for an rndis_filter_packet struct.
		 * Changed malloc to M_NOWAIT to avoid sleep under spin lock.
		 * No longer reserving extra space for page buffers, as they
		 * are already part of the netvsc_packet.
		 */
		buf = malloc(HV_NV_PACKET_OFFSET_IN_BUF +
		    sizeof(netvsc_packet) + rppi_size +
		    sizeof(rndis_filter_packet),
		    M_DEVBUF, M_ZERO | M_NOWAIT);
		if (buf == NULL) {
			m_freem(m);

			return (ENOMEM);
		}

		packet = (netvsc_packet *)(buf + HV_NV_PACKET_OFFSET_IN_BUF);
		*(vm_offset_t *)buf = HV_NV_SC_PTR_OFFSET_IN_BUF;

		/*
		 * extension points to the area reserved for the
		 * rndis_filter_packet, which is placed just after
		 * the netvsc_packet (and rppi struct, if present;
		 * length is updated later).
		 */
		packet->extension = packet + 1;

		/* Set up the rndis header */
		packet->page_buf_count = num_frags;

		/* Initialize it from the mbuf */
		packet->tot_data_buf_len = len;

		/*
		 * If the Hyper-V infrastructure needs to embed a VLAN tag,
		 * initialize netvsc_packet and rppi struct values as needed.
		 */
		if (rppi_size) {
			/* Lower layers need the VLAN TCI */
			packet->vlan_tci = m_head->m_pkthdr.ether_vtag;
		}

		/*
		 * Fill the page buffers with mbuf info starting at index
		 * HV_RF_NUM_TX_RESERVED_PAGE_BUFS.
		 */
		i = HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
		for (m = m_head; m != NULL; m = m->m_next) {
			if (m->m_len) {
				vm_offset_t paddr =
				    vtophys(mtod(m, vm_offset_t));
				packet->page_buffers[i].pfn =
				    paddr >> PAGE_SHIFT;
				packet->page_buffers[i].offset =
				    paddr & (PAGE_SIZE - 1);
				packet->page_buffers[i].length = m->m_len;
				i++;
			}
		}

		/*
		 * If bpf, copy the mbuf chain.  This is less expensive than
		 * it appears; the mbuf clusters are not copied, only their
		 * reference counts are incremented.
		 * Needed to avoid a race condition where the completion
		 * callback is invoked, freeing the mbuf chain, before the
		 * bpf_mtap code has a chance to run.
		 */
		if (ifp->if_bpf) {
			mc_head = m_copypacket(m_head, M_DONTWAIT);
		}
retry_send:
		/* Set the completion routine */
		packet->compl.send.on_send_completion = netvsc_xmit_completion;
		packet->compl.send.send_completion_context = packet;
		packet->compl.send.send_completion_tid = (uint64_t)m_head;

		/* Removed critical_enter(), does not appear necessary */
		ret = hv_rf_on_send(device_ctx, packet);

		if (ret == 0) {
			ifp->if_opackets++;
			/* if bpf && mc_head, call bpf_mtap code */
			if (mc_head) {
				ETHER_BPF_MTAP(ifp, mc_head);
			}
		} else {
			retries++;
			if (retries < 4) {
				goto retry_send;
			}

			IF_PREPEND(&ifp->if_snd, m_head);
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;

			/*
			 * Null the mbuf pointer so the completion function
			 * does not free the mbuf chain.  We just pushed the
			 * mbuf chain back on the if_snd queue.
			 */
			packet->compl.send.send_completion_tid = 0;

			/*
			 * Release the resources since we will not get any
			 * send completion
			 */
			netvsc_xmit_completion(packet);
		}

		/* if bpf && mc_head, free the mbuf chain copy */
		if (mc_head) {
			m_freem(mc_head);
		}
	}