Exemple #1
0
/*
 * Deallocate a ppp unit.  Must be called at splsoftnet or higher.
 */
void
pppdealloc(struct ppp_softc *sc)
{
    struct ppp_pkt *pkt;
    struct mbuf *m;

    splsoftassert(IPL_SOFTNET);

    if_down(&sc->sc_if);
    sc->sc_if.if_flags &= ~(IFF_UP|IFF_RUNNING);
    sc->sc_devp = NULL;
    sc->sc_xfer = 0;
    while ((pkt = ppp_pkt_dequeue(&sc->sc_rawq)) != NULL)
	ppp_pkt_free(pkt);
    while ((m = mq_dequeue(&sc->sc_inq)) != NULL)
	m_freem(m);
    for (;;) {
	IF_DEQUEUE(&sc->sc_fastq, m);
	if (m == NULL)
	    break;
	m_freem(m);
    }
    while ((m = sc->sc_npqueue) != NULL) {
	sc->sc_npqueue = m->m_nextpkt;
	m_freem(m);
    }
    m_freem(sc->sc_togo);
    sc->sc_togo = NULL;

#ifdef PPP_COMPRESS
    ppp_ccp_closed(sc);
    sc->sc_xc_state = NULL;
    sc->sc_rc_state = NULL;
#endif /* PPP_COMPRESS */
#if NBPFILTER > 0
    if (sc->sc_pass_filt.bf_insns != 0) {
	free(sc->sc_pass_filt.bf_insns, M_DEVBUF, 0);
	sc->sc_pass_filt.bf_insns = 0;
	sc->sc_pass_filt.bf_len = 0;
    }
    if (sc->sc_active_filt.bf_insns != 0) {
	free(sc->sc_active_filt.bf_insns, M_DEVBUF, 0);
	sc->sc_active_filt.bf_insns = 0;
	sc->sc_active_filt.bf_len = 0;
    }
#endif
#ifdef VJC
    if (sc->sc_comp != 0) {
	free(sc->sc_comp, M_DEVBUF, 0);
	sc->sc_comp = 0;
    }
#endif
}
Exemple #2
0
static void iavc_start_tx(iavc_softc_t *sc)
{
    struct mbuf *m;
    u_int32_t txlen;

    /* If device has put us on hold, punt. */

    if (sc->sc_blocked) {
	return;
    }

    /* If using DMA and transmitter busy, punt. */
    if (sc->sc_dma && (sc->sc_csr & EN_TX_TC_INT)) {
	return;
    }

    /* Else, see if we have messages to send. */
    IF_DEQUEUE(&sc->sc_txq, m);
    if (!m) {
	return;
    }

    /* Have message, will send. */
    if (CAPIMSG_LEN(m->m_data)) {
	/* A proper CAPI message, possibly with B3 data */
	txlen = iavc_tx_capimsg(sc, m);
    } else {
	/* A board control message to be sent as is */
	txlen = iavc_tx_ctrlmsg(sc, m);
    }

    if (m->m_next) {
	i4b_Bfreembuf(m->m_next);
	m->m_next = NULL;
    }
    i4b_Dfreembuf(m);

    /* Kick DMA into motion if applicable */
    if (sc->sc_dma) {
	txlen = (txlen + 3) & ~3;

	bus_dmamap_sync(sc->dmat, sc->tx_map, 0, txlen,
	  BUS_DMASYNC_PREWRITE);

	AMCC_WRITE(sc, AMCC_TXPTR, sc->tx_map->dm_segs[0].ds_addr);
	AMCC_WRITE(sc, AMCC_TXLEN, txlen);
	sc->sc_csr |= EN_TX_TC_INT;

	if (!sc->sc_intr)
	    AMCC_WRITE(sc, AMCC_INTCSR, sc->sc_csr);
    }
}
/*
 * Encapsulate a packet of type family for the local net.
 */
static void
snstart(struct ifnet *ifp)
{
	struct sn_softc	*sc = ifp->if_softc;
	struct mbuf	*m;
	int		mtd_next;

	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		return;

outloop:
	/* Check for room in the xmit buffer. */
	if ((mtd_next = (sc->mtd_free + 1)) == NTDA)
		mtd_next = 0;

	if (mtd_next == sc->mtd_hw) {
		ifp->if_flags |= IFF_OACTIVE;
		return;
	}

	IF_DEQUEUE(&ifp->if_snd, m);
	if (m == 0)
		return;

	/* We need the header for m_pkthdr.len. */
	if ((m->m_flags & M_PKTHDR) == 0)
		panic("%s: snstart: no header mbuf", device_xname(sc->sc_dev));

	/*
	 * If bpf is listening on this interface, let it
	 * see the packet before we commit it to the wire.
	 */
	bpf_mtap(ifp, m);

	/*
	 * If there is nothing in the o/p queue, and there is room in
	 * the Tx ring, then send the packet directly.  Otherwise append
	 * it to the o/p queue.
	 */
	if ((sonicput(sc, m, mtd_next)) == 0) {
		IF_PREPEND(&ifp->if_snd, m);
		return;
	}

	sc->mtd_prev = sc->mtd_free;
	sc->mtd_free = mtd_next;

	ifp->if_opackets++;		/* # of pkts */

	/* Jump back for possibly more punishment. */
	goto outloop;
}
Exemple #4
0
void
scestart(int unit)
{
	sce_softc_t ssc = scegetssc(unit);
	scsit_return_t sr;
	struct ifnet *ifp;
	io_req_t ior;
	TR_DECL("scestart");

	tr2("enter: unit = 0x%x", unit);

	assert(ssc != NULL_SSC);

	ifp = &ssc->sce_if;
	IF_DEQUEUE(&ifp->if_snd, ior);
      dequeued:
	while(ior) {
		struct ether_header *ehp = (struct ether_header *)ior->io_data;
		scsit_handle_t handle;

		bcopy((const char *)sce_fake_addr,(char *)&ehp->ether_shost,6);

		sr = scsit_handle_alloc(&handle);
		assert(sr == SCSIT_SUCCESS);

		tr1("sending");
		sr = scsit_send(handle,
				ssc->node,
				sce_lun,
				(void *)ior,
				(char *)ehp,
				ior->io_count,
				FALSE);
		assert(sr == SCSIT_SUCCESS);
		IF_DEQUEUE(&ifp->if_snd, ior);
	}
	tr1("exit");
}
hdintr ()
{
	register struct mbuf *m;
	register struct hdcb *hdp;
	register struct ifnet *ifp;
	register int s;
	static struct ifnet *lastifp;
	static struct hdcb *lasthdp;

	for (;;) {
		s = splimp ();
		IF_DEQUEUE (&hdintrq, m);
		splx (s);
		if (m == 0)
			break;
		if (m->m_len < HDHEADERLN) {
			printf ("hdintr: packet too short (len=%d)\n",
				m->m_len);
			m_freem (m);
			continue;
		}
		if ((m->m_flags & M_PKTHDR) == 0)
			panic("hdintr");
		ifp = m->m_pkthdr.rcvif;

		/*
		 * look up the appropriate hdlc control block
		 */

		if (ifp == lastifp)
			hdp = lasthdp;
		else {
			for (hdp = hdcbhead; hdp; hdp = hdp->hd_next)
				if (hdp->hd_ifp == ifp)
					break;
			if (hdp == 0) {
				printf ("hdintr: unknown interface %x\n", ifp);
				m_freem (m);
				continue;
			}
			lastifp = ifp;
			lasthdp = hdp;
		}

		/* Process_rxframe returns FALSE if the frame was NOT queued
		   for the next higher layers. */
		if (process_rxframe (hdp, m) == FALSE)
			m_freem (m);
	}
}
Exemple #6
0
void xilTemacTxThreadSingle(struct ifnet* ifp)
{
  struct XilTemac* xilTemac = ifp->if_softc;
  struct mbuf*     m;
  uint32_t base = xilTemac->iAddr;

#ifdef DEBUG
  printk("%s: tx send packet, interface '%s'\n", DRIVER_PREFIX, xilTemac->iUnitName );
#endif

  /* Send packets till mbuf queue empty or tx fifo full */
  for(;;) {
    uint32_t i = 0;

    /* 1) clear out any statuses from previously sent tx frames */
    while( IN32(base + XTE_IPISR_OFFSET) & XTE_IPXR_XMIT_DONE_MASK ) {
      IN32(base + XTE_TSR_OFFSET);
      OUT32(base + XTE_IPISR_OFFSET, XTE_IPXR_XMIT_DONE_MASK);
      i++;
    }
    if( i > xilTemac->iStats.iTxMaxDrained ) {
      xilTemac->iStats.iTxMaxDrained = i;
    }

    /* 2) Check if enough space in tx data fifo _and_ tx tplr for an entire
       ethernet frame */
    if( xilTemacTxFifoVacancyBytes( xilTemac->iAddr ) <= ifp->if_mtu ) {
      /* 2a) If not, enable transmit done interrupt and break out of loop to
         wait for space */
      uint32_t ipier = IN32(base + XTE_IPIER_OFFSET);
      ipier |= (XTE_IPXR_XMIT_DONE_MASK);
      OUT32(base + XTE_IPIER_OFFSET, ipier);
      break;
    }

    /* 3) Contuine to dequeue mbuf chains till none left */
    IF_DEQUEUE( &(ifp->if_snd), m);
    if( !m ) {
      break;
    }

    /* 4) Send dequeued mbuf chain */
    xilTemacSendPacket( ifp, m );

    /* 5) Free mbuf chain */
    m_freem( m );
  }
  ifp->if_flags &= ~IFF_OACTIVE;
}
static status_t
compat_read(void *cookie, off_t position, void *buffer, size_t *numBytes)
{
	struct ifnet *ifp = cookie;
	uint32 semFlags = B_CAN_INTERRUPT;
	status_t status;
	struct mbuf *mb;
	size_t length;

	//if_printf(ifp, "compat_read(%lld, %p, [%lu])\n", position,
	//	buffer, *numBytes);

	if (ifp->flags & DEVICE_CLOSED)
		return B_INTERRUPTED;

	if (ifp->flags & DEVICE_NON_BLOCK)
		semFlags |= B_RELATIVE_TIMEOUT;

	do {
		status = acquire_sem_etc(ifp->receive_sem, 1, semFlags, 0);
		if (ifp->flags & DEVICE_CLOSED)
			return B_INTERRUPTED;

		if (status == B_WOULD_BLOCK) {
			*numBytes = 0;
			return B_OK;
		} else if (status < B_OK)
			return status;

		IF_DEQUEUE(&ifp->receive_queue, mb);
	} while (mb == NULL);

	length = min_c(max_c((size_t)mb->m_pkthdr.len, 0), *numBytes);

#if 0
	mb = m_defrag(mb, 0);
	if (mb == NULL) {
		*numBytes = 0;
		return B_NO_MEMORY;
	}
#endif

	m_copydata(mb, 0, length, buffer);
	*numBytes = length;

	m_freem(mb);
	return B_OK;
}
/*
 * Start output on the pflog interface.
 */
void
pflogstart(struct ifnet *ifp)
{
	struct mbuf *m;
	int s;

	for (;;) {
		s = splnet();
		IF_DROP(&ifp->if_snd);
		IF_DEQUEUE(&ifp->if_snd, m);
		splx(s);

		if (m == NULL)
			return;
		m_freem(m);
	}
}
Exemple #9
0
void
btintr(void)
{
	struct mbuf *m;
	int s;

	for (;;) {
		s = splnet();
		IF_DEQUEUE(&btintrq, m);
		splx(s);

		if (m == NULL)
			break;

		ng_btsocket_hci_raw_data_input(m);
	}
}
/*---------------------------------------------------------------------------*
 *	clear an hdlc rx queue for a rbch unit
 *---------------------------------------------------------------------------*/
static void
rbch_clrq(void *softc)
{
	struct rbch_softc *sc = softc;
	struct mbuf *m;
	int s;

	for(;;)
	{
		s = splnet();
		IF_DEQUEUE(&sc->sc_hdlcq, m);
		splx(s);

		if(m)
			m_freem(m);
		else
			break;
	}
}
Exemple #11
0
void
ieee80211_drain_ifq(struct ifqueue* ifq)
{
	struct ieee80211_node* ni;
	struct mbuf* m;

	for (;;) {
		IF_DEQUEUE(ifq, m);
		if (m == NULL)
			break;

		ni = (struct ieee80211_node*)m->m_pkthdr.rcvif;
		KASSERT(ni != NULL, ("frame w/o node"));
		ieee80211_free_node(ni);
		m->m_pkthdr.rcvif = NULL;

		m_freem(m);
	}
}
Exemple #12
0
static void
fwe_start(struct ifnet *ifp)
{
	struct fwe_softc *fwe = ((struct fwe_eth_softc *)ifp->if_softc)->fwe;
	int s;

	FWEDEBUG(ifp, "starting\n");

	if (fwe->dma_ch < 0) {
		struct mbuf	*m = NULL;

		FWEDEBUG(ifp, "not ready\n");

		s = splimp();
		do {
			IF_DEQUEUE(&ifp->if_snd, m);
			if (m != NULL)
				m_freem(m);
			ifp->if_oerrors ++;
		} while (m != NULL);
		splx(s);

		return;
	}

	s = splimp();
#if defined(__FreeBSD__)
	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
#else
	ifp->if_flags |= IFF_OACTIVE;
#endif

	if (ifp->if_snd.ifq_len != 0)
		fwe_as_output(fwe, ifp);

#if defined(__FreeBSD__)
	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
#else
	ifp->if_flags &= ~IFF_OACTIVE;
#endif
	splx(s);
}
Exemple #13
0
void
mplsintr(void)
{
	struct mbuf *m;
	int s;

	for (;;) {
		/* Get next datagram of input queue */
		s = splnet();
		IF_DEQUEUE(&mplsintrq, m);
		splx(s);
		if (m == NULL)
			return;
#ifdef DIAGNOSTIC
		if ((m->m_flags & M_PKTHDR) == 0)
			panic("mplsintr no HDR");
#endif
		mpls_input(m);
	}
}
Exemple #14
0
/*---------------------------------------------------------------------------*
 *	read from trace device
 *---------------------------------------------------------------------------*/
PDEVSTATIC int
isdntrcread(dev_t dev, struct uio * uio, int ioflag)
{
	struct mbuf *m;
	int x;
	int error = 0;
	int unit = minor(dev);
	
	if(!(device_state[unit] & ST_ISOPEN))
		return(EIO);

	x = splnet();
	
	while(IF_QEMPTY(&trace_queue[unit]) && (device_state[unit] & ST_ISOPEN))
	{
		device_state[unit] |= ST_WAITDATA;
		
		if((error = tsleep((caddr_t) &trace_queue[unit],
					TTIPRI | PCATCH,
					"bitrc", 0 )) != 0)
		{
			device_state[unit] &= ~ST_WAITDATA;
			splx(x);
			return(error);
		}
	}

	IF_DEQUEUE(&trace_queue[unit], m);

	if(m && m->m_len)
		error = uiomove(m->m_data, m->m_len, uio);
	else
		error = EIO;
		
	if(m)
		i4b_Bfreembuf(m);

	splx(x);
	
	return(error);
}
Exemple #15
0
static void tx_daemon(void *arg)
{
  struct ifnet *ifp = &arpcom.ac_if;
  rtems_event_set events;
  struct mbuf *m;
  
  while(1) {
    rtems_bsdnet_event_receive(START_TRANSMIT_EVENT,
      RTEMS_EVENT_ANY | RTEMS_WAIT, RTEMS_NO_TIMEOUT, &events);
    while(1) {
      IF_DEQUEUE(&ifp->if_snd, m);
      if(m == NULL)
        break;
      rtems_bsdnet_event_receive(CTS_EVENT, RTEMS_EVENT_ANY | RTEMS_WAIT,
        RTEMS_NO_TIMEOUT, &events);
      send_packet(ifp, m);
      m_freem(m);
    }
    ifp->if_flags &= ~IFF_OACTIVE;
  }
}
Exemple #16
0
void
uether_rxflush(struct usb_ether *ue)
{
	struct ifnet *ifp = uether_getifp(ue);
	struct mbuf *m;

	UE_LOCK_ASSERT(ue);

	for (;;) {
		IF_DEQUEUE(&ue->ue_rxq, m);
		if (m == NULL)
			break;

		/*
		 * The USB xfer has been resubmitted so its safe to unlock now.
		 */
		UE_UNLOCK(ue);
		ifp->if_input(ifp, m, NULL, -1);
		UE_LOCK(ue);
	}
}
Exemple #17
0
Static void
cdce_start(struct ifnet *ifp)
{
    struct cdce_softc	*sc;
    struct mbuf		*m_head = NULL;

    sc = ifp->if_softc;
    CDCE_LOCK(sc);


    if (sc->cdce_dying ||
            ifp->if_flags & IFF_OACTIVE ||
            !(ifp->if_flags & IFF_RUNNING)) {
        CDCE_UNLOCK(sc);
        return;
    }

    IF_DEQUEUE(&ifp->if_snd, m_head);
    if (m_head == NULL) {
        CDCE_UNLOCK(sc);
        return;
    }

    if (cdce_encap(sc, m_head, 0)) {
        IF_PREPEND(&ifp->if_snd, m_head);
        ifp->if_flags |= IFF_OACTIVE;
        CDCE_UNLOCK(sc);
        return;
    }

    BPF_MTAP(ifp, m_head);

    ifp->if_flags |= IFF_OACTIVE;

    CDCE_UNLOCK(sc);

    return;
}
Exemple #18
0
/*
 * This function shall be called by drivers immediately after every DTIM.
 * Transmit all group addressed MSDUs buffered at the AP.
 */
void
ieee80211_notify_dtim(struct ieee80211com *ic)
{
	/* NB: group addressed MSDUs are buffered in ic_bss */
	struct ieee80211_node *ni = ic->ic_bss;
	struct ifnet *ifp = &ic->ic_if;
	struct ieee80211_frame *wh;
	struct mbuf *m;

	KASSERT(ic->ic_opmode == IEEE80211_M_HOSTAP);

	for (;;) {
		IF_DEQUEUE(&ni->ni_savedq, m);
		if (m == NULL)
			break;
		if (!IF_IS_EMPTY(&ni->ni_savedq)) {
			/* more queued frames, set the more data bit */
			wh = mtod(m, struct ieee80211_frame *);
			wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
		}
		IF_ENQUEUE(&ic->ic_pwrsaveq, m);
		(*ifp->if_start)(ifp);
	}
Exemple #19
0
i4btelioctl(dev_t dev, int cmd, caddr_t data, int flag, struct proc *p)
#endif
{
	int error = 0;
        struct mbuf *m;
        int s;
	tel_sc_t *sc = &tel_sc[minor(dev)];

	switch(cmd)
	{
		case I4B_TEL_GETAUDIOFMT:
			*(int *)data = sc->audiofmt;
			break;
		
		case I4B_TEL_SETAUDIOFMT:
			sc->audiofmt = *(int *)data;
			break;

		case I4B_TEL_EMPTYINPUTQUEUE:
			s = splimp();
			while((sc->devstate & ST_CONNECTED)	&&
				(sc->devstate & ST_ISOPEN) 	&&
				!IF_QEMPTY(sc->isdn_linktab->rx_queue))
			{
				IF_DEQUEUE(sc->isdn_linktab->rx_queue, m);
				if(m)
					i4b_Bfreembuf(m);
			}
			splx(s);
			break;

		default:
			error = ENOTTY;
			break;
	}
	return(error);
}
Exemple #20
0
/*---------------------------------------------------------------------------*
 *	i4bputqueue_hipri - put message into front of queue to userland
 *---------------------------------------------------------------------------*/
void
i4bputqueue_hipri(struct mbuf *m)
{
	int x;

	if(!openflag)
	{
		i4b_Dfreembuf(m);
		return;
	}

	x = splnet();

	if(IF_QFULL(&i4b_rdqueue))
	{
		struct mbuf *m1;
		IF_DEQUEUE(&i4b_rdqueue, m1);
		i4b_Dfreembuf(m1);
		NDBGL4(L4_ERR, "ERROR, queue full, removing entry!");
	}

	IF_PREPEND(&i4b_rdqueue, m);

	splx(x);

	if(readflag)
	{
		readflag = 0;
		wakeup((void *) &i4b_rdqueue);
	}

	if(selflag)
	{
		selflag = 0;
		selnotify(&select_rd_info, 0, 0);
	}
}
Exemple #21
0
/*---------------------------------------------------------------------------*
 *	i4bputqueue_hipri - put message into front of queue to userland
 *---------------------------------------------------------------------------*/
void
i4bputqueue_hipri(struct mbuf *m)
{
	int x;
	
	if(!openflag)
	{
		i4b_Dfreembuf(m);
		return;
	}

	x = splimp();
	
	if(IF_QFULL(&i4b_rdqueue))
	{
		struct mbuf *m1;
		IF_DEQUEUE(&i4b_rdqueue, m1);
		i4b_Dfreembuf(m1);
		DBGL4(L4_ERR, "i4bputqueue", ("ERROR, queue full, removing entry!\n"));
	}

	IF_PREPEND(&i4b_rdqueue, m);

	splx(x);	

	if(readflag)
	{
		readflag = 0;
		wakeup((caddr_t) &i4b_rdqueue);
	}

	if(selflag)
	{
		selflag = 0;
		selwakeup(&select_rd_info);
	}
}
Exemple #22
0
/*
 * Software interrupt routine, called at spl[soft]net.
 */
static void
pppintr(netmsg_t msg)
{
    struct mbuf *m;
    struct ppp_softc *sc;
    struct ifaltq_subque *ifsq;
    int i;

    /*
     * Packets are never sent to this netisr so the message must always
     * be replied.  Interlock processing and notification by replying
     * the message first.
     */
    lwkt_replymsg(&msg->lmsg, 0);

    get_mplock();

    sc = ppp_softc;
    ifsq = ifq_get_subq_default(&sc->sc_if.if_snd);

    for (i = 0; i < NPPP; ++i, ++sc) {
	ifnet_serialize_all(&sc->sc_if);
	if (!(sc->sc_flags & SC_TBUSY)
	    && (!ifsq_is_empty(ifsq) || !IF_QEMPTY(&sc->sc_fastq))) {
	    sc->sc_flags |= SC_TBUSY;
	    (*sc->sc_start)(sc);
	} 
	for (;;) {
	    IF_DEQUEUE(&sc->sc_rawq, m);
	    if (m == NULL)
		break;
	    ppp_inproc(sc, m);
	}
	ifnet_deserialize_all(&sc->sc_if);
    }
    rel_mplock();
}
Exemple #23
0
void
mplsintr(void)
{

	struct mbuf *m;

	for (;;) {
		IFQ_LOCK(&mplsintrq);
		IF_DEQUEUE(&mplsintrq, m);
		IFQ_UNLOCK(&mplsintrq);

		if (!m)
			return;

		if (((m->m_flags & M_PKTHDR) == 0) ||
		    (m->m_pkthdr.rcvif_index == 0))
			panic("mplsintr(): no pkthdr or rcvif");

#ifdef MBUFTRACE
		m_claimm(m, &mpls_owner);
#endif
		mpls_input(m_get_rcvif_NOMPSAFE(m), m);
	}
}
Exemple #24
0
void
mplsintr(void)
{
	struct mbuf *m;
	int s;

	while (!IF_IS_EMPTY(&mplsintrq)) {
		s = splnet();
		IF_DEQUEUE(&mplsintrq, m);
		splx(s);

		if (!m)
			return;

		if (((m->m_flags & M_PKTHDR) == 0) ||
		    (m->m_pkthdr.rcvif == 0))
			panic("mplsintr(): no pkthdr or rcvif");

#ifdef MBUFTRACE
		m_claimm(m, &mpls_owner);
#endif
		mpls_input(m->m_pkthdr.rcvif, m);
	}
}
Exemple #25
0
static void
fwe_start(struct ifnet *ifp)
{
	struct fwe_softc *fwe = ((struct fwe_eth_softc *)ifp->if_softc)->fwe;
	int s;

#if 1
	FWEDEBUG("%s%d starting\n", ifp->if_name, ifp->if_unit);

	if (fwe->dma_ch < 0) {
		struct mbuf	*m = NULL;

		FWEDEBUG("%s%d not ready.\n", ifp->if_name, ifp->if_unit);

		s = splimp();
		do {
			IF_DEQUEUE(&ifp->if_snd, m);
			if (m != NULL)
				m_freem(m);
			ifp->if_oerrors ++;
		} while (m != NULL);
		splx(s);

		return;
	}

#endif
	s = splimp();
	ifp->if_flags |= IFF_OACTIVE;

	if (ifp->if_snd.ifq_len != 0)
		fwe_as_output(fwe, ifp);

	ifp->if_flags &= ~IFF_OACTIVE;
	splx(s);
}
Exemple #26
0
/*---------------------------------------------------------------------------*
 *	start transmission on a b channel
 *---------------------------------------------------------------------------*/
static void
isic_bchannel_start(int unit, int h_chan)
{
	struct l1_softc *sc = &l1_sc[unit];
	l1_bchan_state_t *chan = &sc->sc_chan[h_chan];
	int next_len;
	int len;
	int activity = -1;
	int cmd = 0;

	crit_enter();
	if(chan->state & HSCX_TX_ACTIVE)	/* already running ? */
	{
		crit_exit();
		return;				/* yes, leave */
	}

	/* get next mbuf from queue */
	
	IF_DEQUEUE(&chan->tx_queue, chan->out_mbuf_head);
	
	if(chan->out_mbuf_head == NULL)		/* queue empty ? */
	{
		crit_exit();
		return;				/* yes, exit */
	}

	/* init current mbuf values */
	
	chan->out_mbuf_cur = chan->out_mbuf_head;
	chan->out_mbuf_cur_len = chan->out_mbuf_cur->m_len;
	chan->out_mbuf_cur_ptr = chan->out_mbuf_cur->m_data;	
	
	/* activity indicator for timeout handling */

	if(chan->bprot == BPROT_NONE)
	{
		if(!(i4b_l1_bchan_tel_silence(chan->out_mbuf_cur->m_data, chan->out_mbuf_cur->m_len)))
			activity = ACT_TX;
	}
	else
	{
		activity = ACT_TX;
	}

	chan->state |= HSCX_TX_ACTIVE;		/* we start transmitting */
	
	if(sc->sc_trace & TRACE_B_TX)	/* if trace, send mbuf to trace dev */
	{
		i4b_trace_hdr_t hdr;
		hdr.unit = L0ISICUNIT(unit);
		hdr.type = (h_chan == HSCX_CH_A ? TRC_CH_B1 : TRC_CH_B2);
		hdr.dir = FROM_TE;
		hdr.count = ++sc->sc_trace_bcount;
		MICROTIME(hdr.time);
		i4b_l1_trace_ind(&hdr, chan->out_mbuf_cur->m_len, chan->out_mbuf_cur->m_data);
	}			

	len = 0;	/* # of chars put into HSCX tx fifo this time */

	/*
	 * fill the HSCX tx fifo with data from the current mbuf. if
	 * current mbuf holds less data than HSCX fifo length, try to
	 * get the next mbuf from (a possible) mbuf chain. if there is
	 * not enough data in a single mbuf or in a chain, then this
	 * is the last mbuf and we tell the HSCX that it has to send
	 * CRC and closing flag
	 */
	 
	while((len < sc->sc_bfifolen) && chan->out_mbuf_cur)
	{
		/*
		 * put as much data into the HSCX fifo as is
		 * available from the current mbuf
		 */
		 
		if((len + chan->out_mbuf_cur_len) >= sc->sc_bfifolen)
			next_len = sc->sc_bfifolen - len;
		else
			next_len = chan->out_mbuf_cur_len;

#ifdef NOTDEF		
		kprintf("b:mh=%x, mc=%x, mcp=%x, mcl=%d l=%d nl=%d # ",
			chan->out_mbuf_head,
			chan->out_mbuf_cur,			
			chan->out_mbuf_cur_ptr,
			chan->out_mbuf_cur_len,
			len,
			next_len);
#endif

		/* wait for tx fifo write enabled */

		isic_hscx_waitxfw(sc, h_chan);

		/* write what we have from current mbuf to HSCX fifo */

		HSCX_WRFIFO(h_chan, chan->out_mbuf_cur_ptr, next_len);

		len += next_len;		/* update # of bytes written */
		chan->txcount += next_len;	/* statistics */
		chan->out_mbuf_cur_ptr += next_len;	/* data ptr */
		chan->out_mbuf_cur_len -= next_len;	/* data len */

		/*
		 * in case the current mbuf (of a possible chain) data
		 * has been put into the fifo, check if there is a next
		 * mbuf in the chain. If there is one, get ptr to it
		 * and update the data ptr and the length
		 */
		 
		if((chan->out_mbuf_cur_len <= 0)	&&
		  ((chan->out_mbuf_cur = chan->out_mbuf_cur->m_next) != NULL))
		{
			chan->out_mbuf_cur_ptr = chan->out_mbuf_cur->m_data;
			chan->out_mbuf_cur_len = chan->out_mbuf_cur->m_len;

			if(sc->sc_trace & TRACE_B_TX)
			{
				i4b_trace_hdr_t hdr;
				hdr.unit = L0ISICUNIT(unit);
				hdr.type = (h_chan == HSCX_CH_A ? TRC_CH_B1 : TRC_CH_B2);
				hdr.dir = FROM_TE;
				hdr.count = ++sc->sc_trace_bcount;
				MICROTIME(hdr.time);
				i4b_l1_trace_ind(&hdr, chan->out_mbuf_cur->m_len, chan->out_mbuf_cur->m_data);
			}			
		}
	}

	/*
	 * if there is either still data in the current mbuf and/or
	 * there is a successor on the chain available issue just
	 * a XTF (transmit) command to HSCX. if ther is no more
	 * data available from the current mbuf (-chain), issue
	 * an XTF and an XME (message end) command which will then
	 * send the CRC and the closing HDLC flag sequence
	 */
	 
	if(chan->out_mbuf_cur && (chan->out_mbuf_cur_len > 0))
	{
		/*
		 * more data available, send current fifo out.
		 * next xfer to HSCX tx fifo is done in the
		 * HSCX interrupt routine.
		 */
		 
		cmd |= HSCX_CMDR_XTF;
	}
	else
	{
		/* end of mbuf chain */
	
		if(chan->bprot == BPROT_NONE)
			cmd |= HSCX_CMDR_XTF;
		else
			cmd |= HSCX_CMDR_XTF | HSCX_CMDR_XME;
		
		i4b_Bfreembuf(chan->out_mbuf_head);	/* free mbuf chain */
		
		chan->out_mbuf_head = NULL;
		chan->out_mbuf_cur = NULL;			
		chan->out_mbuf_cur_ptr = NULL;
		chan->out_mbuf_cur_len = 0;
	}

	/* call timeout handling routine */
	
	if(activity == ACT_RX || activity == ACT_TX)
		(*chan->isic_drvr_linktab->bch_activity)(chan->isic_drvr_linktab->unit, activity);

	if(cmd)
		isic_hscx_cmd(sc, h_chan, cmd);
		
	crit_exit();
}
Exemple #27
0
/*
 * Get a packet to send.  This procedure is intended to be called at
 * splsoftnet, since it may involve time-consuming operations such as
 * applying VJ compression, packet compression, address/control and/or
 * protocol field compression to the packet.
 */
struct mbuf *
ppp_dequeue(struct ppp_softc *sc)
{
    struct mbuf *m, *mp;
    u_char *cp;
    int address, control, protocol;

    /*
     * Grab a packet to send: first try the fast queue, then the
     * normal queue.
     */
    IF_DEQUEUE(&sc->sc_fastq, m);
    if (m == NULL)
	m = ifsq_dequeue(ifq_get_subq_default(&sc->sc_if.if_snd));
    if (m == NULL)
	return NULL;

    ++sc->sc_stats.ppp_opackets;

    /*
     * Extract the ppp header of the new packet.
     * The ppp header will be in one mbuf.
     */
    cp = mtod(m, u_char *);
    address = PPP_ADDRESS(cp);
    control = PPP_CONTROL(cp);
    protocol = PPP_PROTOCOL(cp);

    switch (protocol) {
    case PPP_IP:
#ifdef VJC
	/*
	 * If the packet is a TCP/IP packet, see if we can compress it.
	 */
	if ((sc->sc_flags & SC_COMP_TCP) && sc->sc_comp != NULL) {
	    struct ip *ip;
	    int type;

	    mp = m;
	    ip = (struct ip *) (cp + PPP_HDRLEN);
	    if (mp->m_len <= PPP_HDRLEN) {
		mp = mp->m_next;
		if (mp == NULL)
		    break;
		ip = mtod(mp, struct ip *);
	    }
	    /* this code assumes the IP/TCP header is in one non-shared mbuf */
	    if (ip->ip_p == IPPROTO_TCP) {
		type = sl_compress_tcp(mp, ip, sc->sc_comp,
				       !(sc->sc_flags & SC_NO_TCP_CCID));
		switch (type) {
		case TYPE_UNCOMPRESSED_TCP:
		    protocol = PPP_VJC_UNCOMP;
		    break;
		case TYPE_COMPRESSED_TCP:
		    protocol = PPP_VJC_COMP;
		    cp = mtod(m, u_char *);
		    cp[0] = address;	/* header has moved */
		    cp[1] = control;
		    cp[2] = 0;
		    break;
		}
		cp[3] = protocol;	/* update protocol in PPP header */
	    }
	}
Exemple #28
0
/*
 * IPX input routine.  Pass to next level.
 */
void
ipxintr()
{
    register struct ipx *ipx;
    register struct mbuf *m;
    register struct ipxpcb *ipxp;
    struct ipx_ifaddr *ia;
    register int i;
    int len, s;
    char oddshortpacket = 0;

next:
    /*
     * Get next datagram off input queue and get IPX header
     * in first mbuf.
     */
    s = splimp();
    IF_DEQUEUE(&ipxintrq, m);
    splx(s);
    if (m == NULL)
        return;
    /*
     * If no IPX addresses have been set yet but the interfaces
     * are receiving, can't do anything with incoming packets yet.
     */
    if (ipx_ifaddr == NULL)
        goto bad;

    ipxstat.ipxs_total++;

    if ((m->m_flags & M_EXT || m->m_len < sizeof(struct ipx)) &&
            (m = m_pullup(m, sizeof(struct ipx))) == 0) {
        ipxstat.ipxs_toosmall++;
        goto next;
    }

    /*
     * Give any raw listeners a crack at the packet
     */
    for (ipxp = ipxrawpcb.ipxp_next; ipxp != &ipxrawpcb;
            ipxp = ipxp->ipxp_next) {
        struct mbuf *m1 = m_copy(m, 0, (int)M_COPYALL);
        if (m1 != NULL)
            ipx_input(m1, ipxp);
    }

    ipx = mtod(m, struct ipx *);
    len = ntohs(ipx->ipx_len);
    if ((len < m->m_pkthdr.len) && (oddshortpacket = len & 1)) {
        /*
         * If this packet is of odd length, and the length
         * inside the header is less than the received packet
         * length, preserve garbage byte for possible checksum.
         */
        len++;
    }

    /*
     * Check that the amount of data in the buffers
     * is as at least much as the IPX header would have us expect.
     * Trim mbufs if longer than we expect.
     * Drop packet if shorter than we expect.
     */
    if (m->m_pkthdr.len < len) {
        ipxstat.ipxs_tooshort++;
        goto bad;
    }
    if (m->m_pkthdr.len > len) {
        if (m->m_len == m->m_pkthdr.len) {
            m->m_len = len;
            m->m_pkthdr.len = len;
        } else
            m_adj(m, len - m->m_pkthdr.len);
    }
    if (ipxcksum && ((i = ipx->ipx_sum) != 0xffff)) {
        ipx->ipx_sum = 0;
        if (i != (ipx->ipx_sum = ipx_cksum(m, len))) {
            ipxstat.ipxs_badsum++;
            goto bad;
        }
    }

    /*
     * Propagated (Netbios) packets (type 20) has to be handled
     * different. :-(
     */
    if (ipx->ipx_pt == IPXPROTO_NETBIOS) {
        if (ipxnetbios) {
            ipx_output_type20(m);
            goto next;
        } else
            goto bad;
    }

    /*
     * Is this a directed broadcast?
     */
    if (ipx_hosteqnh(ipx_broadhost,ipx->ipx_dna.x_host)) {
        if ((!ipx_neteq(ipx->ipx_dna, ipx->ipx_sna)) &&
                (!ipx_neteqnn(ipx->ipx_dna.x_net, ipx_broadnet)) &&
                (!ipx_neteqnn(ipx->ipx_sna.x_net, ipx_zeronet)) &&
                (!ipx_neteqnn(ipx->ipx_dna.x_net, ipx_zeronet)) ) {
            /*
             * If it is a broadcast to the net where it was
             * received from, treat it as ours.
             */
            for (ia = ipx_ifaddr; ia != NULL; ia = ia->ia_next)
                if((ia->ia_ifa.ifa_ifp == m->m_pkthdr.rcvif) &&
                        ipx_neteq(ia->ia_addr.sipx_addr,
                                  ipx->ipx_dna))
                    goto ours;

            /*
             * Look to see if I need to eat this packet.
             * Algorithm is to forward all young packets
             * and prematurely age any packets which will
             * by physically broadcasted.
             * Any very old packets eaten without forwarding
             * would die anyway.
             *
             * Suggestion of Bill Nesheim, Cornell U.
             */
            if (ipx->ipx_tc < IPX_MAXHOPS) {
                ipx_forward(m);
                goto next;
            }
        }
        /*
         * Is this our packet? If not, forward.
         */
    } else {
        for (ia = ipx_ifaddr; ia != NULL; ia = ia->ia_next)
            if (ipx_hosteq(ipx->ipx_dna, ia->ia_addr.sipx_addr) &&
                    (ipx_neteq(ipx->ipx_dna, ia->ia_addr.sipx_addr) ||
                     ipx_neteqnn(ipx->ipx_dna.x_net, ipx_zeronet)))
                break;

        if (ia == NULL) {
            ipx_forward(m);
            goto next;
        }
    }
ours:
    /*
     * Locate pcb for datagram.
     */
    ipxp = ipx_pcblookup(&ipx->ipx_sna, ipx->ipx_dna.x_port, IPX_WILDCARD);
    /*
     * Switch out to protocol's input routine.
     */
    if (ipxp != NULL) {
        if (oddshortpacket) {
            m_adj(m, -1);
        }
        ipxstat.ipxs_delivered++;
        if ((ipxp->ipxp_flags & IPXP_ALL_PACKETS) == 0)
            switch (ipx->ipx_pt) {

            case IPXPROTO_SPX:
                spx_input(m, ipxp);
                goto next;
            }
        ipx_input(m, ipxp);
    } else
        goto bad;

    goto next;

bad:
    m_freem(m);
    goto next;
}
Exemple #29
0
/*
 * admsw_start:		[ifnet interface function]
 *
 *	Start packet transmission on the interface.
 */
static void
admsw_start(struct ifnet *ifp)
{
	struct admsw_softc *sc = ifp->if_softc;
	struct mbuf *m0, *m;
	struct admsw_descsoft *ds;
	struct admsw_desc *desc;
	bus_dmamap_t dmamap;
	struct ether_header *eh;
	int error, nexttx, len, i;
	static int vlan = 0;

	/*
	 * Loop through the send queues, setting up transmit descriptors
	 * unitl we drain the queues, or use up all available transmit
	 * descriptors.
	 */
	for (;;) {
		vlan++;
		if (vlan == SW_DEVS)
			vlan = 0;
		i = vlan;
		for (;;) {
			ifp = sc->sc_ifnet[i];
			if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) 
			    == IFF_DRV_RUNNING) {
				/* Grab a packet off the queue. */
				IF_DEQUEUE(&ifp->if_snd, m0);
				if (m0 != NULL)
					break;
			}
			i++;
			if (i == SW_DEVS)
				i = 0;
			if (i == vlan)
				return;
		}
		vlan = i;
		m = NULL;

		/* Get a spare descriptor. */
		if (sc->sc_txfree == 0) {
			/* No more slots left; notify upper layer. */
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
			break;
		}
		nexttx = sc->sc_txnext;
		desc = &sc->sc_txldescs[nexttx];
		ds = &sc->sc_txlsoft[nexttx];
		dmamap = ds->ds_dmamap;

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we
		 * were short on resources.  In this case, we'll copy
		 * and try again.
		 */
		if (m0->m_pkthdr.len < ETHER_MIN_LEN ||
		    bus_dmamap_load_mbuf(sc->sc_bufs_dmat, dmamap, m0,
		    admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT) != 0) {
			MGETHDR(m, M_NOWAIT, MT_DATA);
			if (m == NULL) {
				device_printf(sc->sc_dev, 
				    "unable to allocate Tx mbuf\n");
				break;
			}
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_NOWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					device_printf(sc->sc_dev, 
					    "unable to allocate Tx cluster\n");
					m_freem(m);
					break;
				}
			}
			m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags;
			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
			if (m->m_pkthdr.len < ETHER_MIN_LEN) {
				if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len)
					panic("admsw_start: M_TRAILINGSPACE\n");
				memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0,
				    ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len);
				m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN;
			}
			error = bus_dmamap_load_mbuf(sc->sc_bufs_dmat, 
			    dmamap, m, admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT);
			if (error) {
				device_printf(sc->sc_dev, 
				    "unable to load Tx buffer, error = %d\n", 
				    error);
				break;
			}
		}

		if (m != NULL) {
			m_freem(m0);
			m0 = m;
		}

		/*
		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
		 */

		/* Sync the DMA map. */
		bus_dmamap_sync(sc->sc_bufs_dmat, dmamap, BUS_DMASYNC_PREWRITE);

		if (ds->ds_nsegs != 1 && ds->ds_nsegs != 2)
			panic("admsw_start: nsegs == %d\n", ds->ds_nsegs);
		desc->data = ds->ds_addr[0];
		desc->len = len = ds->ds_len[0];
		if (ds->ds_nsegs > 1) {
			len += ds->ds_len[1];
			desc->cntl = ds->ds_addr[1] | ADM5120_DMA_BUF2ENABLE;
		} else
			desc->cntl = 0;
		desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan);
		eh = mtod(m0, struct ether_header *);
		if (ntohs(eh->ether_type) == ETHERTYPE_IP &&
		    m0->m_pkthdr.csum_flags & CSUM_IP)
			desc->status |= ADM5120_DMA_CSUM;
		if (nexttx == ADMSW_NTXLDESC - 1)
			desc->data |= ADM5120_DMA_RINGEND;
		desc->data |= ADM5120_DMA_OWN;

		/* Sync the descriptor. */
		ADMSW_CDTXLSYNC(sc, nexttx,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		REG_WRITE(SEND_TRIG_REG, 1);
		/* printf("send slot %d\n",nexttx); */

		/*
		 * Store a pointer to the packet so we can free it later.
		 */
		ds->ds_mbuf = m0;

		/* Advance the Tx pointer. */
		sc->sc_txfree--;
		sc->sc_txnext = ADMSW_NEXTTXL(nexttx);

		/* Pass the packet to any BPF listeners. */
		BPF_MTAP(ifp, m0);

		/* Set a watchdog timer in case the chip flakes out. */
		sc->sc_timer = 5;
	}
Exemple #30
0
/* Async. stream output */
static void
fwe_as_output(struct fwe_softc *fwe, struct ifnet *ifp)
{
	struct mbuf *m;
	struct fw_xfer *xfer;
	struct fw_xferq *xferq;
	struct fw_pkt *fp;
	int i = 0;

	xfer = NULL;
	xferq = fwe->fd.fc->atq;
	while ((xferq->queued < xferq->maxq - 1) &&
			(ifp->if_snd.ifq_head != NULL)) {
		FWE_LOCK(fwe);
		xfer = STAILQ_FIRST(&fwe->xferlist);
		if (xfer == NULL) {
#if 0
			printf("if_fwe: lack of xfer\n");
#endif
			FWE_UNLOCK(fwe);
			break;
		}
		STAILQ_REMOVE_HEAD(&fwe->xferlist, link);
		FWE_UNLOCK(fwe);

		IF_DEQUEUE(&ifp->if_snd, m);
		if (m == NULL) {
			FWE_LOCK(fwe);
			STAILQ_INSERT_HEAD(&fwe->xferlist, xfer, link);
			FWE_UNLOCK(fwe);
			break;
		}
#if defined(__DragonFly__) || __FreeBSD_version < 500000
		if (ifp->if_bpf != NULL)
			bpf_mtap(ifp, m);
#else
		BPF_MTAP(ifp, m);
#endif

		/* keep ip packet alignment for alpha */
		M_PREPEND(m, ETHER_ALIGN, M_NOWAIT);
		fp = &xfer->send.hdr;
		*(uint32_t *)&xfer->send.hdr = *(int32_t *)&fwe->pkt_hdr;
		fp->mode.stream.len = m->m_pkthdr.len;
		xfer->mbuf = m;
		xfer->send.pay_len = m->m_pkthdr.len;

		if (fw_asyreq(fwe->fd.fc, -1, xfer) != 0) {
			/* error */
			ifp->if_oerrors ++;
			/* XXX set error code */
			fwe_output_callback(xfer);
		} else {
			ifp->if_opackets ++;
			i++;
		}
	}
#if 0
	if (i > 1)
		printf("%d queued\n", i);
#endif
	if (i > 0)
		xferq->start(fwe->fd.fc);
}