/*---------------------------------------------------------------------------* * routine INVOKE RETRANSMISSION (Q.921 03/93 page 84) *---------------------------------------------------------------------------*/ void i4b_invoke_retransmission(l2_softc_t *l2sc, int nr) { DBGL2(L2_ERROR, "i4b_invoke_retransmission", ("nr = %d\n", nr )); while(l2sc->vs != nr) { DBGL2(L2_ERROR, "i4b_invoke_retransmission", ("nr != vs, nr = %d, vs = %d\n", nr, l2sc->vs)); M128DEC(l2sc->vs); /* XXXXXXXXXXXXXXXXX */ if((l2sc->ua_num != UA_EMPTY) && (l2sc->vs == l2sc->ua_num)) { IF_ENQUEUE(&l2sc->i_queue, l2sc->ua_frame); l2sc->ua_num = UA_EMPTY; } else { DBGL2(L2_ERROR, "i4b_invoke_retransmission", ("ERROR, l2sc->vs = %d, l2sc->ua_num = %d \n",l2sc->vs, l2sc->ua_num)); } /* XXXXXXXXXXXXXXXXX */ i4b_i_frame_queued_up(l2sc); } }
int uether_rxbuf(struct usb_ether *ue, struct usb_page_cache *pc, unsigned int offset, unsigned int len) { struct ifnet *ifp = uether_getifp(ue); struct mbuf *m; UE_LOCK_ASSERT(ue); if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) return (1); m = uether_newbuf(); if (m == NULL) { IFNET_STAT_INC(ifp, iqdrops, 1); return (ENOMEM); } usbd_copy_out(pc, offset, mtod(m, uint8_t *), len); /* finalize mbuf */ IFNET_STAT_INC(ifp, ipackets, 1); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; /* enqueue for later when the lock can be released */ IF_ENQUEUE(&ue->ue_rxq, m); return (0); }
ifpsend(struct ifnet *ifp, struct mbuf *m) { long s = splhigh(); IF_ENQUEUE(&ifp->if_snd, m); (*ifp->if_start)(ifp); splx(s); }
int iavc_release(capi_softc_t *capi_sc, int applid) { iavc_softc_t *sc = (iavc_softc_t*) capi_sc->ctx; struct mbuf *m = i4b_Dgetmbuf(7); u_int8_t *p; if (!m) { aprint_error_dev(&sc->sc_dev, "can't get memory\n"); return (ENOMEM); } /* * byte 0x14 = SEND_RELEASE * dword ApplId */ p = amcc_put_byte(mtod(m, u_int8_t*), 0); p = amcc_put_byte(p, 0); p = amcc_put_byte(p, SEND_RELEASE); p = amcc_put_word(p, applid); IF_ENQUEUE(&sc->sc_txq, m); iavc_start_tx(sc); return 0; }
int iavc_send(capi_softc_t *capi_sc, struct mbuf *m) { iavc_softc_t *sc = (iavc_softc_t*) capi_sc->ctx; if (sc->sc_state != IAVC_UP) { aprint_error_dev(&sc->sc_dev, "attempt to send before device up\n"); if (m->m_next) i4b_Bfreembuf(m->m_next); i4b_Dfreembuf(m); return (ENXIO); } if (IF_QFULL(&sc->sc_txq)) { IF_DROP(&sc->sc_txq); aprint_error_dev(&sc->sc_dev, "tx overflow, message dropped\n"); if (m->m_next) i4b_Bfreembuf(m->m_next); i4b_Dfreembuf(m); } else { IF_ENQUEUE(&sc->sc_txq, m); iavc_start_tx(sc); } return 0; }
static int iavc_send_init(iavc_softc_t *sc) { struct mbuf *m = i4b_Dgetmbuf(15); u_int8_t *p; int s; if (!m) { aprint_error_dev(&sc->sc_dev, "can't get memory\n"); return (ENOMEM); } /* * byte 0x11 = SEND_INIT * dword NumApplications * dword NumNCCIs * dword BoardNumber */ p = amcc_put_byte(mtod(m, u_int8_t*), 0); p = amcc_put_byte(p, 0); p = amcc_put_byte(p, SEND_INIT); p = amcc_put_word(p, 1); /* XXX MaxAppl XXX */ p = amcc_put_word(p, sc->sc_capi.sc_nbch); p = amcc_put_word(p, sc->sc_unit); s = splnet(); IF_ENQUEUE(&sc->sc_txq, m); iavc_start_tx(sc); sc->sc_state = IAVC_INIT; splx(s); return 0; }
/*---------------------------------------------------------------------------* * i4bputqueue - put message into queue to userland *---------------------------------------------------------------------------*/ void i4bputqueue(struct mbuf *m) { if(!openflag) { i4b_Dfreembuf(m); return; } crit_enter(); if(IF_QFULL(&i4b_rdqueue)) { struct mbuf *m1; IF_DEQUEUE(&i4b_rdqueue, m1); i4b_Dfreembuf(m1); NDBGL4(L4_ERR, "ERROR, queue full, removing entry!"); } IF_ENQUEUE(&i4b_rdqueue, m); crit_exit(); if(readflag) { readflag = 0; wakeup((caddr_t) &i4b_rdqueue); } KNOTE(&kq_rd_info.ki_note, 0); }
void EcosSendToEth(char* iface, char* buf, size_t len) { cyg_netdevtab_entry_t *t; struct eth_drv_sc *sc; struct ifnet *ifp; int found = 0; struct mbuf *m = NULL; for (t = &__NETDEVTAB__[0]; t != &__NETDEVTAB_END__; t++) { sc = (struct eth_drv_sc *)t->device_instance; if (strcmp(sc->dev_name, iface) == 0) { found =1; break; } } if ( found == 1 ) { //ret=rt28xx_ap_ioctl(sc, param, (caddr_t)&wrq); diag_printf("find %s\n",iface); ifp = &sc->sc_arpcom.ac_if; m=Ecos_MemPool_Alloc(24,MemPool_TYPE_CLUSTER); //ether_output_frame(ifp, m); if(m == NULL) { diag_printf("m == NULL %s\n",iface); } else { m->m_pkthdr.rcvif = ifp; m->m_data = buf; m->m_pkthdr.len = len; m->m_len = m->m_pkthdr.len; if (IF_QFULL(&ifp->if_snd)) { // Let the interface try a dequeue anyway, in case the // interface has "got better" from whatever made the queue // fill up - being unplugged for example. if ((ifp->if_flags & IFF_OACTIVE) == 0) (*ifp->if_start)(ifp); IF_DROP(&ifp->if_snd); //senderr(ENOBUFS); } ifp->if_obytes += m->m_pkthdr.len; IF_ENQUEUE(&ifp->if_snd, m); if (m->m_flags & M_MCAST) ifp->if_omcasts++; if ((ifp->if_flags & IFF_OACTIVE) == 0) (*ifp->if_start)(ifp); //m_freem(m); } } else { diag_printf("not find %s\n",iface); } }
/* ARGSUSED */ int pppopen(struct rtems_termios_tty *tty) { int i; register struct ppp_softc *sc; struct mbuf *m = (struct mbuf *)0; if (tty->t_line == PPPDISC) { sc = (struct ppp_softc *)tty->t_sc; if (sc != NULL && sc->sc_devp == (void *)tty) { return (0); } } if ((sc = pppalloc(1)) == NULL) { return ENXIO; } if (sc->sc_relinq) (*sc->sc_relinq)(sc); /* get previous owner to relinquish the unit */ sc->sc_ilen = 0; sc->sc_m = NULL; bzero(sc->sc_asyncmap, sizeof(sc->sc_asyncmap)); sc->sc_asyncmap[0] = 0xffffffff; sc->sc_asyncmap[3] = 0x60000000; sc->sc_rasyncmap = 0; sc->sc_devp = tty; sc->sc_start = pppasyncstart; sc->sc_ctlp = pppasyncctlp; sc->sc_relinq = pppasyncrelinq; sc->sc_outm = NULL; sc->sc_outmc = NULL; /* preallocate mbufs for free queue */ rtems_bsdnet_semaphore_obtain(); for (i=0; i<NUM_MBUFQ; i++) { pppallocmbuf(sc, &m); if ( i == 0 ) { /* use first mbuf for rx iterrupt handling */ sc->sc_m = m; } else { /* enqueue mbuf for later use */ IF_ENQUEUE(&sc->sc_freeq, m); } m = (struct mbuf *)0; } rtems_bsdnet_semaphore_release(); /* initialize values */ sc->sc_if.if_flags |= IFF_RUNNING; sc->sc_if.if_baudrate = rtems_termios_baud_to_number(tty->termios.c_cflag & CBAUD); tty->t_sc = (void *)sc; return ( RTEMS_SUCCESSFUL ); }
/* * After a change in the NPmode for some NP, move packets from the * npqueue to the send queue or the fast queue as appropriate. * Should be called at splsoftnet. */ static void ppp_requeue(struct ppp_softc *sc) { struct mbuf *m, **mpp; struct ifqueue *ifq; enum NPmode mode; int error; splsoftassert(IPL_SOFTNET); for (mpp = &sc->sc_npqueue; (m = *mpp) != NULL; ) { switch (PPP_PROTOCOL(mtod(m, u_char *))) { case PPP_IP: mode = sc->sc_npmode[NP_IP]; break; default: mode = NPMODE_PASS; } switch (mode) { case NPMODE_PASS: /* * This packet can now go on one of the queues to be sent. */ *mpp = m->m_nextpkt; m->m_nextpkt = NULL; if (m->m_flags & M_HIGHPRI) { ifq = &sc->sc_fastq; if (IF_QFULL(ifq)) { IF_DROP(ifq); m_freem(m); error = ENOBUFS; } else { IF_ENQUEUE(ifq, m); error = 0; } } else IFQ_ENQUEUE(&sc->sc_if.if_snd, m, NULL, error); if (error) { sc->sc_if.if_oerrors++; sc->sc_stats.ppp_oerrors++; } break; case NPMODE_DROP: case NPMODE_ERROR: *mpp = m->m_nextpkt; m_freem(m); break; case NPMODE_QUEUE: mpp = &m->m_nextpkt; break; } } sc->sc_npqtail = mpp; }
/* * After a change in the NPmode for some NP, move packets from the * npqueue to the send queue or the fast queue as appropriate. * Should be called at spl[soft]net. */ static void ppp_requeue(struct ppp_softc *sc) { struct mbuf *m, **mpp; struct ifqueue *ifq; struct ifaltq_subque *ifsq; enum NPmode mode; int error; ifsq = ifq_get_subq_default(&sc->sc_if.if_snd); for (mpp = &sc->sc_npqueue; (m = *mpp) != NULL; ) { switch (PPP_PROTOCOL(mtod(m, u_char *))) { case PPP_IP: mode = sc->sc_npmode[NP_IP]; break; default: mode = NPMODE_PASS; } switch (mode) { case NPMODE_PASS: /* * This packet can now go on one of the queues to be sent. */ *mpp = m->m_nextpkt; m->m_nextpkt = NULL; if ((m->m_flags & M_HIGHPRI) && !ifq_is_enabled(&sc->sc_if.if_snd)) { ifq = &sc->sc_fastq; if (IF_QFULL(ifq)) { IF_DROP(ifq); error = ENOBUFS; } else { IF_ENQUEUE(ifq, m); error = 0; } } else { error = ifsq_enqueue(ifsq, m, NULL); } if (error) { IFNET_STAT_INC(&sc->sc_if, oerrors, 1); sc->sc_stats.ppp_oerrors++; } break; case NPMODE_DROP: case NPMODE_ERROR: *mpp = m->m_nextpkt; m_freem(m); break; case NPMODE_QUEUE: mpp = &m->m_nextpkt; break; } } sc->sc_npqtail = mpp; }
static rtems_task ppp_rxdaemon(rtems_task_argument arg) { rtems_event_set events; rtems_interrupt_level level; struct ppp_softc *sc = (struct ppp_softc *)arg; struct mbuf *mp = (struct mbuf *)0; struct mbuf *m; /* enter processing loop */ while ( 1 ) { /* wait for event */ rtems_event_receive(RX_PACKET|RX_MBUF|RX_EMPTY,RTEMS_WAIT|RTEMS_EVENT_ANY,RTEMS_NO_TIMEOUT,&events); if ( events & RX_EMPTY ) { printf("RX: QUEUE is EMPTY\n"); events &= ~RX_EMPTY; } if ( events ) { /* get the network semaphore */ rtems_bsdnet_semaphore_obtain(); /* check to see if new packet was received */ if ( events & RX_PACKET ) { /* get received packet mbuf chain */ rtems_interrupt_disable(level); IF_DEQUEUE(&sc->sc_rawq, m); rtems_interrupt_enable(level); /* ensure packet was retrieved */ if ( m != (struct mbuf *)0 ) { /* process the received packet */ mp = ppp_inproc(sc, m); } } /* allocate a new mbuf to replace one */ if ( mp == NULL ) { pppallocmbuf(sc, &mp); } /* place mbuf on freeq */ rtems_interrupt_disable(level); IF_ENQUEUE(&sc->sc_freeq, mp); rtems_interrupt_enable(level); mp = (struct mbuf *)0; /* release the network semaphore */ rtems_bsdnet_semaphore_release(); /* check to see if queue is empty */ if ( sc->sc_rawq.ifq_head ) { /* queue is not empty - post another event */ rtems_event_send(sc->sc_rxtask, RX_PACKET); } } } }
void enqueue(struct ifqueue *inq, struct mbuf *m) { int s = splimp(); if (IF_QFULL(inq)) { IF_DROP(inq); m_freem(m); } else IF_ENQUEUE(inq, m); splx(s); }
void ubt_xmit_cmd(struct device *self, struct mbuf *m) { struct ubt_softc *sc = device_get_softc(self); KKASSERT(sc->sc_enabled); crit_enter(); IF_ENQUEUE(&sc->sc_cmd_queue, m); if (sc->sc_cmd_busy == 0) ubt_xmit_cmd_start(sc); crit_exit(); }
int uether_rxmbuf(struct usb_ether *ue, struct mbuf *m, unsigned int len) { struct ifnet *ifp = uether_getifp(ue); UE_LOCK_ASSERT(ue); /* finalize mbuf */ IFNET_STAT_INC(ifp, ipackets, 1); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; /* enqueue for later when the lock can be released */ IF_ENQUEUE(&ue->ue_rxq, m); return (0); }
/*---------------------------------------------------------------------------* * L2 -> L1: PH-DATA-REQUEST (D-Channel) * * NOTE: We may get called here from ihfc_hdlc_Dread or isac_hdlc_Dread * via the upper layers. *---------------------------------------------------------------------------*/ static int ihfc_ph_data_req(int unit, struct mbuf *m, int freeflag) { ihfc_sc_t *sc = &ihfc_softc[unit]; u_char chan = 0; HFC_VAR; if (!m) return 0; HFC_BEG; if(S_PHSTATE != 3) { NDBGL1(L1_PRIM, "L1 was not running: " "ihfc_ph_activate_req(unit = %d)!", unit); ihfc_ph_activate_req(unit); } /* "Allow" I-frames (-hp) */ if (freeflag == MBUF_DONTFREE) m = m_copypacket(m, M_DONTWAIT); if (!_IF_QFULL(&S_IFQUEUE) && m) { IF_ENQUEUE(&S_IFQUEUE, m); ihfc_B_start(unit, chan); /* (recycling) */ } else { NDBGL1(L1_ERROR, "No frame out (unit = %d)", unit); if (m) i4b_Dfreembuf(m); HFC_END; return 0; } if (S_INTR_ACTIVE) S_INT_S1 |= 0x04; HFC_END; return 1; }
/*---------------------------------------------------------------------------* * routine INVOKE RETRANSMISSION (Q.921 03/93 page 84) *---------------------------------------------------------------------------*/ void i4b_invoke_retransmission(l2_softc_t *l2sc, int nr) { CRIT_VAR; CRIT_BEG; NDBGL2(L2_ERROR, "nr = %d", nr ); while(l2sc->vs != nr) { NDBGL2(L2_ERROR, "nr(%d) != vs(%d)", nr, l2sc->vs); M128DEC(l2sc->vs); /* XXXXXXXXXXXXXXXXX */ if((l2sc->ua_num != UA_EMPTY) && (l2sc->vs == l2sc->ua_num)) { if(_IF_QFULL(&l2sc->i_queue)) { NDBGL2(L2_ERROR, "ERROR, I-queue full!"); } else { IF_ENQUEUE(&l2sc->i_queue, l2sc->ua_frame); l2sc->ua_num = UA_EMPTY; } } else { NDBGL2(L2_ERROR, "ERROR, l2sc->vs = %d, l2sc->ua_num = %d ",l2sc->vs, l2sc->ua_num); } /* XXXXXXXXXXXXXXXXX */ i4b_i_frame_queued_up(l2sc); } CRIT_END; }
int iavc_register(capi_softc_t *capi_sc, int applid, int nchan) { iavc_softc_t *sc = (iavc_softc_t*) capi_sc->ctx; struct mbuf *m = i4b_Dgetmbuf(23); u_int8_t *p; if (!m) { aprint_error("iavc%d: can't get memory\n", sc->sc_unit); return (ENOMEM); } /* * byte 0x12 = SEND_REGISTER * dword ApplId * dword NumMessages * dword NumB3Connections 0..nbch * dword NumB3Blocks * dword B3Size */ p = amcc_put_byte(mtod(m, u_int8_t*), 0); p = amcc_put_byte(p, 0); p = amcc_put_byte(p, SEND_REGISTER); p = amcc_put_word(p, applid); #if 0 p = amcc_put_word(p, 1024 + (nchan + 1)); #else p = amcc_put_word(p, 1024 * (nchan + 1)); #endif p = amcc_put_word(p, nchan); p = amcc_put_word(p, 8); p = amcc_put_word(p, 2048); IF_ENQUEUE(&sc->sc_txq, m); iavc_start_tx(sc); return 0; }
/* * This function shall be called by drivers immediately after every DTIM. * Transmit all group addressed MSDUs buffered at the AP. */ void ieee80211_notify_dtim(struct ieee80211com *ic) { /* NB: group addressed MSDUs are buffered in ic_bss */ struct ieee80211_node *ni = ic->ic_bss; struct ifnet *ifp = &ic->ic_if; struct ieee80211_frame *wh; struct mbuf *m; KASSERT(ic->ic_opmode == IEEE80211_M_HOSTAP); for (;;) { IF_DEQUEUE(&ni->ni_savedq, m); if (m == NULL) break; if (!IF_IS_EMPTY(&ni->ni_savedq)) { /* more queued frames, set the more data bit */ wh = mtod(m, struct ieee80211_frame *); wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; } IF_ENQUEUE(&ic->ic_pwrsaveq, m); (*ifp->if_start)(ifp); }
/*---------------------------------------------------------------------------* * this routine is called from the HSCX interrupt handler * when a new frame (mbuf) has been received and is to be put on * the rx queue. *---------------------------------------------------------------------------*/ static void rbch_rx_data_rdy(void *softc) { struct rbch_softc *sc = softc; if(sc->sc_bprot == BPROT_RHDLC) { register struct mbuf *m; if((m = *sc->sc_ilt->rx_mbuf) == NULL) return; m->m_pkthdr.len = m->m_len; if(IF_QFULL(&sc->sc_hdlcq)) { NDBGL4(L4_RBCHDBG, "(minor=%d) hdlc rx queue full!", sc->sc_unit); m_freem(m); } else { IF_ENQUEUE(&sc->sc_hdlcq, m); } } if(sc->sc_devstate & ST_RDWAITDATA) { NDBGL4(L4_RBCHDBG, "(minor=%d) wakeup", sc->sc_unit); sc->sc_devstate &= ~ST_RDWAITDATA; wakeup((void *) &sc->sc_ilt->rx_queue); } else { NDBGL4(L4_RBCHDBG, "(minor=%d) NO wakeup", sc->sc_unit); } selnotify(&sc->selp, 0, 0); }
/*---------------------------------------------------------------------------* * i4bputqueue - put message into queue to userland *---------------------------------------------------------------------------*/ void i4bputqueue(struct mbuf *m) { int x; if(!openflag) { i4b_Dfreembuf(m); return; } x = splnet(); if(IF_QFULL(&i4b_rdqueue)) { struct mbuf *m1; IF_DEQUEUE(&i4b_rdqueue, m1); i4b_Dfreembuf(m1); NDBGL4(L4_ERR, "ERROR, queue full, removing entry!"); } IF_ENQUEUE(&i4b_rdqueue, m); splx(x); if(readflag) { readflag = 0; wakeup((void *) &i4b_rdqueue); } if(selflag) { selflag = 0; selnotify(&select_rd_info, 0, 0); } }
/*---------------------------------------------------------------------------* * i4bputqueue - put message into queue to userland *---------------------------------------------------------------------------*/ void i4bputqueue(struct mbuf *m) { int x; if(!openflag) { i4b_Dfreembuf(m); return; } x = splimp(); if(IF_QFULL(&i4b_rdqueue)) { struct mbuf *m1; IF_DEQUEUE(&i4b_rdqueue, m1); i4b_Dfreembuf(m1); DBGL4(L4_ERR, "i4bputqueue", ("ERROR, queue full, removing entry!\n")); } IF_ENQUEUE(&i4b_rdqueue, m); splx(x); if(readflag) { readflag = 0; wakeup((caddr_t) &i4b_rdqueue); } if(selflag) { selflag = 0; selwakeup(&select_rd_info); } }
static int ng_bt3c_rcvdata(hook_p hook, item_p item) { bt3c_softc_p sc = (bt3c_softc_p)NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); struct mbuf *m = NULL; int error = 0; if (sc == NULL) { error = EHOSTDOWN; goto out; } if (hook != sc->hook) { error = EINVAL; goto out; } NGI_GET_M(item, m); if (IF_QFULL(&sc->outq)) { NG_BT3C_ERR(sc->dev, "Outgoing queue is full. Dropping mbuf, len=%d\n", m->m_pkthdr.len); IF_DROP(&sc->outq); NG_BT3C_STAT_OERROR(sc->stat); NG_FREE_M(m); } else IF_ENQUEUE(&sc->outq, m); error = ng_send_fn(sc->node, NULL, bt3c_send, NULL, 0 /* new send */); out: NG_FREE_ITEM(item); return (error); } /* ng_bt3c_rcvdata */
/*---------------------------------------------------------------------------* * write to rbch device *---------------------------------------------------------------------------*/ PDEVSTATIC int isdnbchanwrite(dev_t dev, struct uio * uio, int ioflag) { struct mbuf *m; int error = 0; int unit = minor(dev); struct rbch_softc *sc = &rbch_softc[unit]; int s; NDBGL4(L4_RBCHDBG, "unit %d, write", unit); s = splnet(); if(!(sc->sc_devstate & ST_ISOPEN)) { NDBGL4(L4_RBCHDBG, "unit %d, write while not open", unit); splx(s); return(EIO); } if((sc->sc_devstate & ST_NOBLOCK)) { if(!(sc->sc_devstate & ST_CONNECTED)) { splx(s); return(EWOULDBLOCK); } if(IF_QFULL(sc->sc_ilt->tx_queue) && (sc->sc_devstate & ST_ISOPEN)) { splx(s); return(EWOULDBLOCK); } } else { while(!(sc->sc_devstate & ST_CONNECTED)) { NDBGL4(L4_RBCHDBG, "unit %d, write wait init", unit); error = tsleep((void *) &rbch_softc[unit], TTIPRI | PCATCH, "wrrbch", 0 ); if(error == ERESTART) { splx(s); return (ERESTART); } else if(error == EINTR) { splx(s); NDBGL4(L4_RBCHDBG, "unit %d, EINTR during wait init", unit); return(EINTR); } else if(error) { splx(s); NDBGL4(L4_RBCHDBG, "unit %d, error %d tsleep init", unit, error); return(error); } tsleep((void *) &rbch_softc[unit], TTIPRI | PCATCH, "xrbch", (hz*1)); } while(IF_QFULL(sc->sc_ilt->tx_queue) && (sc->sc_devstate & ST_ISOPEN)) { sc->sc_devstate |= ST_WRWAITEMPTY; NDBGL4(L4_RBCHDBG, "unit %d, write queue full", unit); if ((error = tsleep((void *) &sc->sc_ilt->tx_queue, TTIPRI | PCATCH, "wrbch", 0)) != 0) { sc->sc_devstate &= ~ST_WRWAITEMPTY; if(error == ERESTART) { splx(s); return(ERESTART); } else if(error == EINTR) { splx(s); NDBGL4(L4_RBCHDBG, "unit %d, EINTR during wait write", unit); return(error); } else if(error) { splx(s); NDBGL4(L4_RBCHDBG, "unit %d, error %d tsleep write", unit, error); return(error); } else if (!(sc->sc_devstate & ST_CONNECTED)) { splx(s); return 0; } } } } if(!(sc->sc_devstate & ST_ISOPEN)) { NDBGL4(L4_RBCHDBG, "unit %d, not open anymore", unit); splx(s); return(EIO); } if((m = i4b_Bgetmbuf(BCH_MAX_DATALEN)) != NULL) { m->m_len = min(BCH_MAX_DATALEN, uio->uio_resid); NDBGL4(L4_RBCHDBG, "unit %d, write %d bytes", unit, m->m_len); error = uiomove(m->m_data, m->m_len, uio); if(IF_QFULL(sc->sc_ilt->tx_queue)) { m_freem(m); } else { IF_ENQUEUE(sc->sc_ilt->tx_queue, m); } (*sc->sc_ilt->bchannel_driver->bch_tx_start)(sc->sc_ilt->l1token, sc->sc_ilt->channel); } splx(s); return(error); }
/* * Queue a packet. Start transmission if not active. * Packet is placed in Information field of PPP frame. * Called at splnet as the if->if_output handler. * Called at splnet from pppwrite(). */ static int pppoutput_serialized(struct ifnet *ifp, struct ifaltq_subque *ifsq, struct mbuf *m0, struct sockaddr *dst, struct rtentry *rtp) { struct ppp_softc *sc = &ppp_softc[ifp->if_dunit]; int protocol, address, control; u_char *cp; int error; #ifdef INET struct ip *ip; #endif struct ifqueue *ifq; enum NPmode mode; int len; struct mbuf *m; struct altq_pktattr pktattr; if (sc->sc_devp == NULL || (ifp->if_flags & IFF_RUNNING) == 0 || ((ifp->if_flags & IFF_UP) == 0 && dst->sa_family != AF_UNSPEC)) { error = ENETDOWN; /* sort of */ goto bad; } ifq_classify(&ifp->if_snd, m0, dst->sa_family, &pktattr); /* * Compute PPP header. */ m0->m_flags &= ~M_HIGHPRI; switch (dst->sa_family) { #ifdef INET case AF_INET: address = PPP_ALLSTATIONS; control = PPP_UI; protocol = PPP_IP; mode = sc->sc_npmode[NP_IP]; /* * If this packet has the "low delay" bit set in the IP header, * put it on the fastq instead. */ ip = mtod(m0, struct ip *); if (ip->ip_tos & IPTOS_LOWDELAY) m0->m_flags |= M_HIGHPRI; break; #endif #ifdef IPX case AF_IPX: /* * This is pretty bogus.. We dont have an ipxcp module in pppd * yet to configure the link parameters. Sigh. I guess a * manual ifconfig would do.... -Peter */ address = PPP_ALLSTATIONS; control = PPP_UI; protocol = PPP_IPX; mode = NPMODE_PASS; break; #endif case AF_UNSPEC: address = PPP_ADDRESS(dst->sa_data); control = PPP_CONTROL(dst->sa_data); protocol = PPP_PROTOCOL(dst->sa_data); mode = NPMODE_PASS; break; default: kprintf("%s: af%d not supported\n", ifp->if_xname, dst->sa_family); error = EAFNOSUPPORT; goto bad; } /* * Drop this packet, or return an error, if necessary. */ if (mode == NPMODE_ERROR) { error = ENETDOWN; goto bad; } if (mode == NPMODE_DROP) { error = 0; goto bad; } /* * Add PPP header. If no space in first mbuf, allocate another. * (This assumes M_LEADINGSPACE is always 0 for a cluster mbuf.) */ if (M_LEADINGSPACE(m0) < PPP_HDRLEN) { m0 = m_prepend(m0, PPP_HDRLEN, MB_DONTWAIT); if (m0 == NULL) { error = ENOBUFS; goto bad; } m0->m_len = 0; } else m0->m_data -= PPP_HDRLEN; cp = mtod(m0, u_char *); *cp++ = address; *cp++ = control; *cp++ = protocol >> 8; *cp++ = protocol & 0xff; m0->m_len += PPP_HDRLEN; len = 0; for (m = m0; m != NULL; m = m->m_next) len += m->m_len; if (sc->sc_flags & SC_LOG_OUTPKT) { kprintf("%s output: ", ifp->if_xname); pppdumpm(m0); } if ((protocol & 0x8000) == 0) { #ifdef PPP_FILTER /* * Apply the pass and active filters to the packet, * but only if it is a data packet. */ *mtod(m0, u_char *) = 1; /* indicates outbound */ if (sc->sc_pass_filt.bf_insns != NULL && bpf_filter(sc->sc_pass_filt.bf_insns, (u_char *) m0, len, 0) == 0) { error = 0; /* drop this packet */ goto bad; } /* * Update the time we sent the most recent packet. */ if (sc->sc_active_filt.bf_insns == NULL || bpf_filter(sc->sc_active_filt.bf_insns, (u_char *) m0, len, 0)) sc->sc_last_sent = time_uptime; *mtod(m0, u_char *) = address; #else /* * Update the time we sent the most recent data packet. */ sc->sc_last_sent = time_uptime; #endif /* PPP_FILTER */ } BPF_MTAP(ifp, m0); /* * Put the packet on the appropriate queue. */ crit_enter(); if (mode == NPMODE_QUEUE) { /* XXX we should limit the number of packets on this queue */ *sc->sc_npqtail = m0; m0->m_nextpkt = NULL; sc->sc_npqtail = &m0->m_nextpkt; } else { /* fastq and if_snd are emptied at spl[soft]net now */ if ((m0->m_flags & M_HIGHPRI) && !ifq_is_enabled(&sc->sc_if.if_snd)) { ifq = &sc->sc_fastq; if (IF_QFULL(ifq) && dst->sa_family != AF_UNSPEC) { IF_DROP(ifq); m_freem(m0); error = ENOBUFS; } else { IF_ENQUEUE(ifq, m0); error = 0; } } else { ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq); error = ifsq_enqueue(ifsq, m0, &pktattr); } if (error) { crit_exit(); IFNET_STAT_INC(&sc->sc_if, oerrors, 1); sc->sc_stats.ppp_oerrors++; return (error); } (*sc->sc_start)(sc); } getmicrotime(&ifp->if_lastchange); IFNET_STAT_INC(ifp, opackets, 1); IFNET_STAT_INC(ifp, obytes, len); crit_exit(); return (0); bad: m_freem(m0); return (error); }
static void lostart(struct ifnet *ifp) { for (;;) { pktqueue_t *pktq = NULL; struct ifqueue *ifq = NULL; struct mbuf *m; size_t pktlen; uint32_t af; int s, isr = 0; IFQ_DEQUEUE(&ifp->if_snd, m); if (m == NULL) return; af = *(mtod(m, uint32_t *)); m_adj(m, sizeof(uint32_t)); switch (af) { #ifdef INET case AF_INET: pktq = ip_pktq; break; #endif #ifdef INET6 case AF_INET6: m->m_flags |= M_LOOP; pktq = ip6_pktq; break; #endif #ifdef IPX case AF_IPX: ifq = &ipxintrq; isr = NETISR_IPX; break; #endif #ifdef NETATALK case AF_APPLETALK: ifq = &atintrq2; isr = NETISR_ATALK; break; #endif default: printf("%s: can't handle af%d\n", ifp->if_xname, af); m_freem(m); return; } pktlen = m->m_pkthdr.len; s = splnet(); if (__predict_true(pktq)) { if (__predict_false(pktq_enqueue(pktq, m, 0))) { m_freem(m); splx(s); return; } ifp->if_ipackets++; ifp->if_ibytes += pktlen; splx(s); continue; } if (IF_QFULL(ifq)) { IF_DROP(ifq); splx(s); m_freem(m); return; } IF_ENQUEUE(ifq, m); schednetisr(isr); ifp->if_ipackets++; ifp->if_ibytes += pktlen; splx(s); } }
int looutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct rtentry *rt) { pktqueue_t *pktq = NULL; struct ifqueue *ifq = NULL; int s, isr = -1; int csum_flags; size_t pktlen; MCLAIM(m, ifp->if_mowner); KASSERT(KERNEL_LOCKED_P()); if ((m->m_flags & M_PKTHDR) == 0) panic("looutput: no header mbuf"); if (ifp->if_flags & IFF_LOOPBACK) bpf_mtap_af(ifp, dst->sa_family, m); m->m_pkthdr.rcvif = ifp; if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) { m_freem(m); return (rt->rt_flags & RTF_BLACKHOLE ? 0 : rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH); } pktlen = m->m_pkthdr.len; ifp->if_opackets++; ifp->if_obytes += pktlen; #ifdef ALTQ /* * ALTQ on the loopback interface is just for debugging. It's * used only for loopback interfaces, not for a simplex interface. */ if ((ALTQ_IS_ENABLED(&ifp->if_snd) || TBR_IS_ENABLED(&ifp->if_snd)) && ifp->if_start == lostart) { struct altq_pktattr pktattr; int error; /* * If the queueing discipline needs packet classification, * do it before prepending the link headers. */ IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr); M_PREPEND(m, sizeof(uint32_t), M_DONTWAIT); if (m == NULL) return (ENOBUFS); *(mtod(m, uint32_t *)) = dst->sa_family; s = splnet(); IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error); (*ifp->if_start)(ifp); splx(s); return (error); } #endif /* ALTQ */ m_tag_delete_nonpersistent(m); #ifdef MPLS if (rt != NULL && rt_gettag(rt) != NULL && rt_gettag(rt)->sa_family == AF_MPLS && (m->m_flags & (M_MCAST | M_BCAST)) == 0) { union mpls_shim msh; msh.s_addr = MPLS_GETSADDR(rt); if (msh.shim.label != MPLS_LABEL_IMPLNULL) { ifq = &mplsintrq; isr = NETISR_MPLS; } } if (isr != NETISR_MPLS) #endif switch (dst->sa_family) { #ifdef INET case AF_INET: csum_flags = m->m_pkthdr.csum_flags; KASSERT((csum_flags & ~(M_CSUM_IPv4|M_CSUM_UDPv4)) == 0); if (csum_flags != 0 && IN_LOOPBACK_NEED_CHECKSUM(csum_flags)) { ip_undefer_csum(m, 0, csum_flags); } m->m_pkthdr.csum_flags = 0; pktq = ip_pktq; break; #endif #ifdef INET6 case AF_INET6: csum_flags = m->m_pkthdr.csum_flags; KASSERT((csum_flags & ~M_CSUM_UDPv6) == 0); if (csum_flags != 0 && IN6_LOOPBACK_NEED_CHECKSUM(csum_flags)) { ip6_undefer_csum(m, 0, csum_flags); } m->m_pkthdr.csum_flags = 0; m->m_flags |= M_LOOP; pktq = ip6_pktq; break; #endif #ifdef IPX case AF_IPX: ifq = &ipxintrq; isr = NETISR_IPX; break; #endif #ifdef NETATALK case AF_APPLETALK: ifq = &atintrq2; isr = NETISR_ATALK; break; #endif default: printf("%s: can't handle af%d\n", ifp->if_xname, dst->sa_family); m_freem(m); return (EAFNOSUPPORT); } s = splnet(); if (__predict_true(pktq)) { int error = 0; if (__predict_true(pktq_enqueue(pktq, m, 0))) { ifp->if_ipackets++; ifp->if_ibytes += pktlen; } else { m_freem(m); error = ENOBUFS; } splx(s); return error; } if (IF_QFULL(ifq)) { IF_DROP(ifq); m_freem(m); splx(s); return (ENOBUFS); } IF_ENQUEUE(ifq, m); schednetisr(isr); ifp->if_ipackets++; ifp->if_ibytes += m->m_pkthdr.len; splx(s); return (0); }
/* * icintr() */ static void icintr (device_t dev, int event, char *ptr) { struct ic_softc *sc = (struct ic_softc *)device_get_softc(dev); int unit = device_get_unit(dev); int s, len; struct mbuf *top; s = splhigh(); switch (event) { case INTR_GENERAL: case INTR_START: sc->ic_cp = sc->ic_ifbuf; sc->ic_xfercnt = 0; break; case INTR_STOP: /* if any error occured during transfert, * drop the packet */ if (sc->ic_iferrs) goto err; if ((len = sc->ic_xfercnt) == 0) break; /* ignore */ if (len <= ICHDRLEN) goto err; if (IF_QFULL(&ipintrq)) { IF_DROP(&ipintrq); break; } len -= ICHDRLEN; sc->ic_if.if_ipackets ++; sc->ic_if.if_ibytes += len; if (sc->ic_if.if_bpf) bpf_tap(&sc->ic_if, sc->ic_ifbuf, len + ICHDRLEN); top = m_devget(sc->ic_ifbuf + ICHDRLEN, len, 0, &sc->ic_if, 0); if (top) { IF_ENQUEUE(&ipintrq, top); schednetisr(NETISR_IP); } break; err: printf("ic%d: errors (%d)!\n", unit, sc->ic_iferrs); sc->ic_iferrs = 0; /* reset error count */ sc->ic_if.if_ierrors ++; break; case INTR_RECEIVE: if (sc->ic_xfercnt >= sc->ic_if.if_mtu+ICHDRLEN) { sc->ic_iferrs ++; } else { *sc->ic_cp++ = *ptr; sc->ic_xfercnt ++; } break; case INTR_NOACK: /* xfer terminated by master */ break; case INTR_TRANSMIT: *ptr = 0xff; /* XXX */ break; case INTR_ERROR: sc->ic_iferrs ++; break; default: panic("%s: unknown event (%d)!", __FUNCTION__, event); } splx(s); return; }
int looutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct rtentry *rt) { int s, isr; struct ifqueue *ifq = NULL; MCLAIM(m, ifp->if_mowner); if ((m->m_flags & M_PKTHDR) == 0) panic("looutput: no header mbuf"); #if NBPFILTER > 0 if (ifp->if_bpf && (ifp->if_flags & IFF_LOOPBACK)) bpf_mtap_af(ifp->if_bpf, dst->sa_family, m); #endif m->m_pkthdr.rcvif = ifp; if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) { m_freem(m); return (rt->rt_flags & RTF_BLACKHOLE ? 0 : rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH); } ifp->if_opackets++; ifp->if_obytes += m->m_pkthdr.len; #ifdef ALTQ /* * ALTQ on the loopback interface is just for debugging. It's * used only for loopback interfaces, not for a simplex interface. */ if ((ALTQ_IS_ENABLED(&ifp->if_snd) || TBR_IS_ENABLED(&ifp->if_snd)) && ifp->if_start == lostart) { struct altq_pktattr pktattr; int error; /* * If the queueing discipline needs packet classification, * do it before prepending the link headers. */ IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr); M_PREPEND(m, sizeof(uint32_t), M_DONTWAIT); if (m == NULL) return (ENOBUFS); *(mtod(m, uint32_t *)) = dst->sa_family; s = splnet(); IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error); (*ifp->if_start)(ifp); splx(s); return (error); } #endif /* ALTQ */ m_tag_delete_nonpersistent(m); switch (dst->sa_family) { #ifdef INET case AF_INET: ifq = &ipintrq; isr = NETISR_IP; break; #endif #ifdef INET6 case AF_INET6: m->m_flags |= M_LOOP; ifq = &ip6intrq; isr = NETISR_IPV6; break; #endif #ifdef ISO case AF_ISO: ifq = &clnlintrq; isr = NETISR_ISO; break; #endif #ifdef IPX case AF_IPX: ifq = &ipxintrq; isr = NETISR_IPX; break; #endif #ifdef NETATALK case AF_APPLETALK: ifq = &atintrq2; isr = NETISR_ATALK; break; #endif default: printf("%s: can't handle af%d\n", ifp->if_xname, dst->sa_family); m_freem(m); return (EAFNOSUPPORT); } s = splnet(); if (IF_QFULL(ifq)) { IF_DROP(ifq); m_freem(m); splx(s); return (ENOBUFS); } IF_ENQUEUE(ifq, m); // schednetisr(isr); ifp->if_ipackets++; ifp->if_ibytes += m->m_pkthdr.len; splx(s); return (0); }
static void lostart(struct ifnet *ifp) { struct ifqueue *ifq; struct mbuf *m; uint32_t af; int s, isr; for (;;) { IFQ_DEQUEUE(&ifp->if_snd, m); if (m == NULL) return; af = *(mtod(m, uint32_t *)); m_adj(m, sizeof(uint32_t)); switch (af) { #ifdef INET case AF_INET: ifq = &ipintrq; isr = NETISR_IP; break; #endif #ifdef INET6 case AF_INET6: m->m_flags |= M_LOOP; ifq = &ip6intrq; isr = NETISR_IPV6; break; #endif #ifdef IPX case AF_IPX: ifq = &ipxintrq; isr = NETISR_IPX; break; #endif #ifdef ISO case AF_ISO: ifq = &clnlintrq; isr = NETISR_ISO; break; #endif #ifdef NETATALK case AF_APPLETALK: ifq = &atintrq2; isr = NETISR_ATALK; break; #endif default: printf("%s: can't handle af%d\n", ifp->if_xname, af); m_freem(m); return; } s = splnet(); if (IF_QFULL(ifq)) { IF_DROP(ifq); splx(s); m_freem(m); return; } IF_ENQUEUE(ifq, m); schednetisr(isr); ifp->if_ipackets++; ifp->if_ibytes += m->m_pkthdr.len; splx(s); } }