static int tap_dev_close(struct tap_softc *sc) { struct ifnet *ifp; int s; s = splnet(); /* Let tap_start handle packets again */ ifp = &sc->sc_ec.ec_if; ifp->if_flags &= ~IFF_OACTIVE; /* Purge output queue */ if (!(IFQ_IS_EMPTY(&ifp->if_snd))) { struct mbuf *m; for (;;) { IFQ_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; ifp->if_opackets++; bpf_mtap(ifp, m); m_freem(m); } } splx(s); if (sc->sc_sih != NULL) { softint_disestablish(sc->sc_sih); sc->sc_sih = NULL; } sc->sc_flags &= ~(TAP_INUSE | TAP_ASYNCIO); return (0); }
/* * priq_dequeue is a dequeue function to be registered to * (*altq_dequeue) in struct ifaltq. * * note: ALTDQ_POLL returns the next packet without removing the packet * from the queue. ALTDQ_REMOVE is a normal dequeue operation. * ALTDQ_REMOVE must return the same packet if called immediately * after ALTDQ_POLL. */ static struct mbuf * priq_dequeue(struct ifaltq *ifq, int op) { struct priq_if *pif = (struct priq_if *)ifq->altq_disc; struct priq_class *cl; struct mbuf *m; int pri; if (IFQ_IS_EMPTY(ifq)) /* no packet in the queue */ return (NULL); for (pri = pif->pif_maxpri; pri >= 0; pri--) { if ((cl = pif->pif_classes[pri]) != NULL && !qempty(cl->cl_q)) { if (op == ALTDQ_POLL) return (priq_pollq(cl)); m = priq_getq(cl); if (m != NULL) { IFQ_DEC_LEN(ifq); if (qempty(cl->cl_q)) cl->cl_period++; PKTCNTR_ADD(&cl->cl_xmitcnt, m_pktlen(m)); } return (m); } } return (NULL); }
void cdcef_txeof(struct usbf_xfer *xfer, void *priv, usbf_status err) { struct cdcef_softc *sc = priv; struct ifnet *ifp = GET_IFP(sc); int s; s = splnet(); #if 0 printf("cdcef_txeof: xfer=%p, priv=%p, %s\n", xfer, priv, usbf_errstr(err)); #endif ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; if (sc->sc_xmit_mbuf != NULL) { m_freem(sc->sc_xmit_mbuf); sc->sc_xmit_mbuf = NULL; } if (err) ifp->if_oerrors++; else ifp->if_opackets++; if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) timeout_add(&sc->start_to, 1); /* XXX */ splx(s); }
void kue_watchdog(struct ifnet *ifp) { struct kue_softc *sc = ifp->if_softc; struct kue_chain *c; usbd_status stat; int s; DPRINTFN(5,("%s: %s: enter\n", sc->kue_dev.dv_xname,__func__)); if (sc->kue_dying) return; ifp->if_oerrors++; printf("%s: watchdog timeout\n", sc->kue_dev.dv_xname); s = splusb(); c = &sc->kue_cdata.kue_tx_chain[0]; usbd_get_xfer_status(c->kue_xfer, NULL, NULL, NULL, &stat); kue_txeof(c->kue_xfer, c, stat); if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) kue_start(ifp); splx(s); }
/* * This is the function where we SEND packets. * * There is no 'receive' equivalent. A typical driver will get * interrupts from the hardware, and from there will inject new packets * into the network stack. * * Once handled, a packet must be freed. A real driver might not be able * to fit all the pending packets into the hardware, and is allowed to * return before having sent all the packets. It should then use the * if_flags flag IFF_OACTIVE to notify the upper layer. * * There are also other flags one should check, such as IFF_PAUSE. * * It is our duty to make packets available to BPF listeners. * * You should be aware that this function is called by the Ethernet layer * at splnet(). * * When the device is opened, we have to pass the packet(s) to the * userland. For that we stay in OACTIVE mode while the userland gets * the packets, and we send a signal to the processes waiting to read. * * wakeup(sc) is the counterpart to the tsleep call in * tap_dev_read, while selnotify() is used for kevent(2) and * poll(2) (which includes select(2)) listeners. */ static void tap_start(struct ifnet *ifp) { struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; struct mbuf *m0; if ((sc->sc_flags & TAP_INUSE) == 0) { /* Simply drop packets */ for(;;) { IFQ_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) return; ifp->if_opackets++; bpf_mtap(ifp, m0); m_freem(m0); } } else if (!IFQ_IS_EMPTY(&ifp->if_snd)) { ifp->if_flags |= IFF_OACTIVE; wakeup(sc); selnotify(&sc->sc_rsel, 0, 1); if (sc->sc_flags & TAP_ASYNCIO) softint_schedule(sc->sc_sih); } }
int rtk_intr(void *arg) { struct rtk_softc *sc; struct ifnet *ifp; uint16_t status; int handled; sc = arg; ifp = &sc->ethercom.ec_if; if (!device_has_power(sc->sc_dev)) return 0; /* Disable interrupts. */ CSR_WRITE_2(sc, RTK_IMR, 0x0000); handled = 0; for (;;) { status = CSR_READ_2(sc, RTK_ISR); if (status == 0xffff) break; /* Card is gone... */ if (status) CSR_WRITE_2(sc, RTK_ISR, status); if ((status & RTK_INTRS) == 0) break; handled = 1; if (status & RTK_ISR_RX_OK) rtk_rxeof(sc); if (status & RTK_ISR_RX_ERR) rtk_rxeof(sc); if (status & (RTK_ISR_TX_OK|RTK_ISR_TX_ERR)) rtk_txeof(sc); if (status & RTK_ISR_SYSTEM_ERR) { rtk_reset(sc); rtk_init(ifp); } } /* Re-enable interrupts. */ CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS); if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) rtk_start(ifp); rnd_add_uint32(&sc->rnd_source, status); return handled; }
int ste_intr(void *xsc) { struct ste_softc *sc; struct ifnet *ifp; u_int16_t status; int claimed = 0; sc = xsc; ifp = &sc->arpcom.ac_if; /* See if this is really our interrupt. */ if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH)) return claimed; for (;;) { status = CSR_READ_2(sc, STE_ISR_ACK); if (!(status & STE_INTRS)) break; claimed = 1; if (status & STE_ISR_RX_DMADONE) { ste_rxeoc(sc); ste_rxeof(sc); } if (status & STE_ISR_TX_DMADONE) ste_txeof(sc); if (status & STE_ISR_TX_DONE) ste_txeoc(sc); if (status & STE_ISR_STATS_OFLOW) { timeout_del(&sc->sc_stats_tmo); ste_stats_update(sc); } if (status & STE_ISR_LINKEVENT) mii_pollstat(&sc->sc_mii); if (status & STE_ISR_HOSTERR) { ste_reset(sc); ste_init(sc); } } /* Re-enable interrupts */ CSR_WRITE_2(sc, STE_IMR, STE_INTRS); if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) ste_start(ifp); return claimed; }
Static void usbintr() { struct ether_header *eh; struct mbuf *m; struct usb_qdat *q; struct ifnet *ifp; int s; s = splimp(); /* Check the RX queue */ while(1) { IF_DEQUEUE(&usbq_rx, m); if (m == NULL) break; eh = mtod(m, struct ether_header *); q = (struct usb_qdat *)m->m_pkthdr.rcvif; ifp = q->ifp; m->m_pkthdr.rcvif = ifp; m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); /* Re-arm the receiver */ (*q->if_rxstart)(ifp); if (!IFQ_IS_EMPTY(&ifp->if_snd)) (*ifp->if_start)(ifp); } /* Check the TX queue */ while(1) { IF_DEQUEUE(&usbq_tx, m); if (m == NULL) break; ifp = m->m_pkthdr.rcvif; m_freem(m); if (!IFQ_IS_EMPTY(&ifp->if_snd)) (*ifp->if_start)(ifp); } splx(s); return; }
int smap_intr(void *arg) { struct smap_softc *sc = arg; struct ifnet *ifp; u_int16_t cause, disable, r; cause = _reg_read_2(SPD_INTR_STATUS_REG16) & _reg_read_2(SPD_INTR_ENABLE_REG16); disable = cause & (SPD_INTR_RXDNV | SPD_INTR_TXDNV); if (disable) { r = _reg_read_2(SPD_INTR_ENABLE_REG16); r &= ~disable; _reg_write_2(SPD_INTR_ENABLE_REG16, r); printf("%s: invalid descriptor. (%c%c)\n", DEVNAME, disable & SPD_INTR_RXDNV ? 'R' : '_', disable & SPD_INTR_TXDNV ? 'T' : '_'); if (disable & SPD_INTR_RXDNV) smap_rxeof(arg); _reg_write_2(SPD_INTR_CLEAR_REG16, disable); } if (cause & SPD_INTR_TXEND) { _reg_write_2(SPD_INTR_CLEAR_REG16, SPD_INTR_TXEND); if (_reg_read_1(SMAP_RXFIFO_FRAME_REG8) > 0) cause |= SPD_INTR_RXEND; smap_txeof(arg); } if (cause & SPD_INTR_RXEND) { _reg_write_2(SPD_INTR_CLEAR_REG16, SPD_INTR_RXEND); smap_rxeof(arg); if (sc->tx_desc_cnt > 0 && sc->tx_desc_cnt > _reg_read_1(SMAP_TXFIFO_FRAME_REG8)) smap_txeof(arg); } if (cause & SPD_INTR_EMAC3) emac3_intr(arg); /* if transmission is pending, start here */ ifp = &sc->ethercom.ec_if; if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) smap_start(ifp); #if NRND > 0 rnd_add_uint32(&sc->rnd_source, cause | sc->tx_fifo_ptr << 16); #endif return (1); }
void pdq_os_restart_transmitter( pdq_t *pdq) { pdq_softc_t *sc = (pdq_softc_t *) pdq->pdq_os_ctx; sc->sc_if.if_flags &= ~IFF_OACTIVE; if (!IFQ_IS_EMPTY(&sc->sc_if.if_snd)) { sc->sc_if.if_timer = PDQ_OS_TX_TIMEOUT; pdq_ifstart(&sc->sc_if); } else { sc->sc_if.if_timer = 0; } }
int an_intr(void *arg) { struct an_softc *sc = arg; struct ifnet *ifp = &sc->sc_ic.ic_if; int i; u_int16_t status; if (!sc->sc_enabled || sc->sc_invalid || (sc->sc_dev.dv_flags & DVF_ACTIVE) == 0 || (ifp->if_flags & IFF_RUNNING) == 0) return 0; if ((ifp->if_flags & IFF_UP) == 0) { CSR_WRITE_2(sc, AN_INT_EN, 0); CSR_WRITE_2(sc, AN_EVENT_ACK, ~0); return 1; } /* maximum 10 loops per interrupt */ for (i = 0; i < 10; i++) { if (!sc->sc_enabled || sc->sc_invalid) return 1; if (CSR_READ_2(sc, AN_SW0) != AN_MAGIC) { DPRINTF(("an_intr: magic number changed: %x\n", CSR_READ_2(sc, AN_SW0))); sc->sc_invalid = 1; return 1; } status = CSR_READ_2(sc, AN_EVENT_STAT); CSR_WRITE_2(sc, AN_EVENT_ACK, status & ~(AN_INTRS)); if ((status & AN_INTRS) == 0) break; if (status & AN_EV_RX) an_rxeof(sc); if (status & (AN_EV_TX | AN_EV_TX_EXC)) an_txeof(sc, status); if (status & AN_EV_LINKSTAT) an_linkstat_intr(sc); if (ifq_is_oactive(&ifp->if_snd) == 0 && sc->sc_ic.ic_state == IEEE80211_S_RUN && !IFQ_IS_EMPTY(&ifp->if_snd)) an_start(ifp); } return 1; }
void pdq_os_restart_transmitter( pdq_t *pdq) { pdq_softc_t *sc = pdq->pdq_os_ctx; PDQ_IFNET(sc)->if_drv_flags &= ~IFF_DRV_OACTIVE; if (IFQ_IS_EMPTY(&PDQ_IFNET(sc)->if_snd) == 0) { sc->timer = PDQ_OS_TX_TIMEOUT; if ((sc->sc_flags & PDQIF_DOWNCALL) == 0) pdq_ifstart_locked(PDQ_IFNET(sc)); } else { sc->timer = 0; } }
void wi_usb_txeof_frm(struct usbd_xfer *xfer, void *priv, usbd_status status) { struct wi_usb_chain *c = priv; struct wi_usb_softc *sc = c->wi_usb_sc; struct wi_softc *wsc = &sc->sc_wi; struct ifnet *ifp = &wsc->sc_ic.ic_if; int s; int err = 0; if (usbd_is_dying(sc->wi_usb_udev)) return; s = splnet(); DPRINTFN(10,("%s: %s: enter status=%d\n", sc->wi_usb_dev.dv_xname, __func__, status)); if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { splx(s); return; } printf("%s: usb error on tx: %s\n", sc->wi_usb_dev.dv_xname, usbd_errstr(status)); if (status == USBD_STALLED) { sc->wi_usb_refcnt++; usbd_clear_endpoint_stall_async( sc->wi_usb_ep[WI_USB_ENDPT_TX]); if (--sc->wi_usb_refcnt < 0) usb_detach_wakeup(&sc->wi_usb_dev); } splx(s); return; } if (status) err = WI_EV_TX_EXC; wi_txeof(wsc, err); wi_usb_tx_unlock(sc); if (!IFQ_IS_EMPTY(&ifp->if_snd)) wi_start_usb(ifp); splx(s); }
Static void url_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct url_chain *c = priv; struct url_softc *sc = c->url_sc; struct ifnet *ifp = GET_IFP(sc); int s; if (sc->sc_dying) return; s = splnet(); DPRINTF(("%s: %s: enter\n", device_xname(sc->sc_dev), __func__)); ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { splx(s); return; } ifp->if_oerrors++; printf("%s: usb error on tx: %s\n", device_xname(sc->sc_dev), usbd_errstr(status)); if (status == USBD_STALLED) { sc->sc_refcnt++; usbd_clear_endpoint_stall_async(sc->sc_pipe_tx); if (--sc->sc_refcnt < 0) usb_detach_wakeupold(sc->sc_dev); } splx(s); return; } ifp->if_opackets++; m_freem(c->url_mbuf); c->url_mbuf = NULL; if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) url_start(ifp); splx(s); }
Static void kue_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct kue_chain *c = priv; struct kue_softc *sc = c->kue_sc; struct ifnet *ifp = GET_IFP(sc); int s; if (sc->kue_dying) return; s = splnet(); DPRINTFN(10,("%s: %s: enter status=%d\n", USBDEVNAME(sc->kue_dev), __func__, status)); ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { splx(s); return; } ifp->if_oerrors++; printf("%s: usb error on tx: %s\n", USBDEVNAME(sc->kue_dev), usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->kue_ep[KUE_ENDPT_TX]); splx(s); return; } ifp->if_opackets++; m_freem(c->kue_mbuf); c->kue_mbuf = NULL; if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) kue_start(ifp); splx(s); }
/* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ void ugl_txeof(struct usbd_xfer *xfer, void *priv, usbd_status status) { struct ugl_chain *c = priv; struct ugl_softc *sc = c->ugl_sc; struct ifnet *ifp = GET_IFP(sc); int s; if (usbd_is_dying(sc->sc_udev)) return; s = splnet(); DPRINTFN(10,("%s: %s: enter status=%d\n", sc->sc_dev.dv_xname, __func__, status)); ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { splx(s); return; } ifp->if_oerrors++; printf("%s: usb error on tx: %s\n", sc->sc_dev.dv_xname, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->sc_ep[UGL_ENDPT_TX]); splx(s); return; } ifp->if_opackets++; m_freem(c->ugl_mbuf); c->ugl_mbuf = NULL; if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) ugl_start(ifp); splx(s); }
Static void url_tick_task(void *xsc) { struct url_softc *sc = xsc; struct ifnet *ifp; struct mii_data *mii; int s; if (sc == NULL) return; DPRINTFN(0xff, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return; ifp = GET_IFP(sc); mii = GET_MII(sc); if (mii == NULL) return; s = splnet(); mii_tick(mii); if (!sc->sc_link) { mii_pollstat(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { DPRINTF(("%s: %s: got link\n", USBDEVNAME(sc->sc_dev), __func__)); sc->sc_link++; if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) url_start(ifp); } } usb_callout(sc->sc_stat_ch, hz, url_tick, sc); splx(s); }
void tsec_start(struct ifnet *ifp) { struct tsec_softc *sc = ifp->if_softc; struct mbuf *m; int idx; if (!(ifp->if_flags & IFF_RUNNING)) return; if (ifp->if_flags & IFF_OACTIVE) return; if (IFQ_IS_EMPTY(&ifp->if_snd)) return; idx = sc->sc_tx_prod; while ((sc->sc_txdesc[idx].td_status & TSEC_TX_TO1) == 0) { IFQ_POLL(&ifp->if_snd, m); if (m == NULL) break; if (tsec_encap(sc, m, &idx)) { ifp->if_flags |= IFF_OACTIVE; break; } /* Now we are committed to transmit the packet. */ IFQ_DEQUEUE(&ifp->if_snd, m); #if NBPFILTER > 0 if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); #endif } if (sc->sc_tx_prod != idx) { sc->sc_tx_prod = idx; /* Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; } }
Static void url_watchdog(struct ifnet *ifp) { struct url_softc *sc = ifp->if_softc; struct url_chain *c; usbd_status stat; int s; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); ifp->if_oerrors++; printf("%s: watchdog timeout\n", USBDEVNAME(sc->sc_dev)); s = splusb(); c = &sc->sc_cdata.url_tx_chain[0]; usbd_get_xfer_status(c->url_xfer, NULL, NULL, NULL, &stat); url_txeof(c->url_xfer, c, stat); if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) url_start(ifp); splx(s); }
static struct mbuf * codel_dequeue(struct ifaltq *ifq, int op) { struct codel_if *cif = (struct codel_if *)ifq->altq_disc; struct mbuf *m; IFQ_LOCK_ASSERT(ifq); if (IFQ_IS_EMPTY(ifq)) return (NULL); if (op == ALTDQ_POLL) return (qhead(cif->cl_q)); m = codel_getq(&cif->codel, cif->cl_q); if (m != NULL) { IFQ_DEC_LEN(ifq); PKTCNTR_ADD(&cif->cl_stats.cl_xmitcnt, m_pktlen(m)); return (m); } return (NULL); }
/* * Established by attachment driver at interrupt priority IPL_NET. */ int imxenet_intr(void *arg) { struct imxenet_softc *sc = arg; struct ifnet *ifp = &sc->sc_ac.ac_if; u_int32_t status; /* Find out which interrupts are pending. */ status = HREAD4(sc, ENET_EIR); /* Acknowledge the interrupts we are about to handle. */ HWRITE4(sc, ENET_EIR, status); /* * Wake up the blocking process to service command * related interrupt(s). */ if (ISSET(status, ENET_EIR_MII)) { sc->intr_status |= status; wakeup(&sc->intr_status); } /* * Handle incoming packets. */ if (ISSET(status, ENET_EIR_RXF)) { if (ifp->if_flags & IFF_RUNNING) imxenet_recv(sc); } /* Try to transmit. */ if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) imxenet_start(ifp); return 1; }
static int tap_dev_read(int unit, struct uio *uio, int flags) { struct tap_softc *sc = device_lookup_private(&tap_cd, unit); struct ifnet *ifp; struct mbuf *m, *n; int error = 0, s; if (sc == NULL) return (ENXIO); getnanotime(&sc->sc_atime); ifp = &sc->sc_ec.ec_if; if ((ifp->if_flags & IFF_UP) == 0) return (EHOSTDOWN); /* * In the TAP_NBIO case, we have to make sure we won't be sleeping */ if ((sc->sc_flags & TAP_NBIO) != 0) { if (!mutex_tryenter(&sc->sc_rdlock)) return (EWOULDBLOCK); } else { mutex_enter(&sc->sc_rdlock); } s = splnet(); if (IFQ_IS_EMPTY(&ifp->if_snd)) { ifp->if_flags &= ~IFF_OACTIVE; /* * We must release the lock before sleeping, and re-acquire it * after. */ mutex_exit(&sc->sc_rdlock); if (sc->sc_flags & TAP_NBIO) error = EWOULDBLOCK; else error = tsleep(sc, PSOCK|PCATCH, "tap", 0); splx(s); if (error != 0) return (error); /* The device might have been downed */ if ((ifp->if_flags & IFF_UP) == 0) return (EHOSTDOWN); if ((sc->sc_flags & TAP_NBIO)) { if (!mutex_tryenter(&sc->sc_rdlock)) return (EWOULDBLOCK); } else { mutex_enter(&sc->sc_rdlock); } s = splnet(); } IFQ_DEQUEUE(&ifp->if_snd, m); ifp->if_flags &= ~IFF_OACTIVE; splx(s); if (m == NULL) { error = 0; goto out; } ifp->if_opackets++; bpf_mtap(ifp, m); /* * One read is one packet. */ do { error = uiomove(mtod(m, void *), min(m->m_len, uio->uio_resid), uio); m = n = m_free(m); } while (m != NULL && uio->uio_resid > 0 && error == 0); if (m != NULL) m_freem(m); out: mutex_exit(&sc->sc_rdlock); return (error); }
static void smc_start_locked(struct ifnet *ifp) { struct smc_softc *sc; struct mbuf *m; u_int len, npages, spin_count; sc = ifp->if_softc; SMC_ASSERT_LOCKED(sc); if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; if (IFQ_IS_EMPTY(&ifp->if_snd)) return; /* * Grab the next packet. If it's too big, drop it. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, m); len = m_length(m, NULL); len += (len & 1); if (len > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(ifp, "large packet discarded\n"); ++ifp->if_oerrors; m_freem(m); return; /* XXX readcheck? */ } /* * Flag that we're busy. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->smc_pending = m; /* * Work out how many 256 byte "pages" we need. We have to include the * control data for the packet in this calculation. */ npages = (len * PKT_CTRL_DATA_LEN) >> 8; if (npages == 0) npages = 1; /* * Request memory. */ smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_TX_ALLOC | npages); /* * Spin briefly to see if the allocation succeeds. */ spin_count = TX_ALLOC_WAIT_TIME; do { if (smc_read_1(sc, IST) & ALLOC_INT) { smc_write_1(sc, ACK, ALLOC_INT); break; } } while (--spin_count); /* * If the allocation is taking too long, unmask the alloc interrupt * and wait. */ if (spin_count == 0) { sc->smc_mask |= ALLOC_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); return; } taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); }
static int emac_intr(void *arg) { struct emac_softc *sc = (struct emac_softc *)arg; struct ifnet * ifp = &sc->sc_ec.ec_if; u_int32_t imr, isr, rsr, ctl; int bi; imr = ~EMAC_READ(ETH_IMR); if (!(imr & (ETH_ISR_RCOM|ETH_ISR_TBRE|ETH_ISR_TIDLE|ETH_ISR_RBNA|ETH_ISR_ROVR))) { // interrupt not enabled, can't be us return 0; } isr = EMAC_READ(ETH_ISR) & imr; rsr = EMAC_READ(ETH_RSR); // get receive status register DPRINTFN(2, ("%s: isr=0x%08X rsr=0x%08X imr=0x%08X\n", __FUNCTION__, isr, rsr, imr)); if (isr & ETH_ISR_RBNA) { // out of receive buffers EMAC_WRITE(ETH_RSR, ETH_RSR_BNA); // clear interrupt ctl = EMAC_READ(ETH_CTL); // get current control register value EMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE); // disable receiver EMAC_WRITE(ETH_RSR, ETH_RSR_BNA); // clear BNA bit EMAC_WRITE(ETH_CTL, ctl | ETH_CTL_RE); // re-enable receiver ifp->if_ierrors++; ifp->if_ipackets++; DPRINTFN(1,("%s: out of receive buffers\n", __FUNCTION__)); } if (isr & ETH_ISR_ROVR) { EMAC_WRITE(ETH_RSR, ETH_RSR_OVR); // clear interrupt ifp->if_ierrors++; ifp->if_ipackets++; DPRINTFN(1,("%s: receive overrun\n", __FUNCTION__)); } if (isr & ETH_ISR_RCOM) { // packet has been received! uint32_t nfo; // @@@ if memory is NOT coherent, then we're in trouble @@@@ // bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); // printf("## RDSC[%i].ADDR=0x%08X\n", sc->rxqi % RX_QLEN, sc->RDSC[sc->rxqi % RX_QLEN].Addr); DPRINTFN(2,("#2 RDSC[%i].INFO=0x%08X\n", sc->rxqi % RX_QLEN, sc->RDSC[sc->rxqi % RX_QLEN].Info)); while (sc->RDSC[(bi = sc->rxqi % RX_QLEN)].Addr & ETH_RDSC_F_USED) { int fl; struct mbuf *m; nfo = sc->RDSC[bi].Info; fl = (nfo & ETH_RDSC_I_LEN) - 4; DPRINTFN(2,("## nfo=0x%08X\n", nfo)); MGETHDR(m, M_DONTWAIT, MT_DATA); if (m != NULL) MCLGET(m, M_DONTWAIT); if (m != NULL && (m->m_flags & M_EXT)) { bus_dmamap_sync(sc->sc_dmat, sc->rxq[bi].m_dmamap, 0, MCLBYTES, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dmat, sc->rxq[bi].m_dmamap); sc->rxq[bi].m->m_pkthdr.rcvif = ifp; sc->rxq[bi].m->m_pkthdr.len = sc->rxq[bi].m->m_len = fl; #if NBPFILTER > 0 if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, sc->rxq[bi].m); #endif /* NBPFILTER > 0 */ DPRINTFN(2,("received %u bytes packet\n", fl)); (*ifp->if_input)(ifp, sc->rxq[bi].m); if (mtod(m, intptr_t) & 3) { m_adj(m, mtod(m, intptr_t) & 3); } sc->rxq[bi].m = m; bus_dmamap_load(sc->sc_dmat, sc->rxq[bi].m_dmamap, m->m_ext.ext_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT); bus_dmamap_sync(sc->sc_dmat, sc->rxq[bi].m_dmamap, 0, MCLBYTES, BUS_DMASYNC_PREREAD); sc->RDSC[bi].Info = 0; sc->RDSC[bi].Addr = sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr | (bi == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0); } else { /* Drop packets until we can get replacement * empty mbufs for the RXDQ. */ if (m != NULL) { m_freem(m); } ifp->if_ierrors++; } sc->rxqi++; } // bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (emac_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) { emac_ifstart(ifp); } #if 0 // reloop irq = EMAC_READ(IntStsC); if ((irq & (IntSts_RxSQ|IntSts_ECI)) != 0) goto begin; #endif return (1); }
void vnet_start(struct ifnet *ifp) { struct vnet_softc *sc = ifp->if_softc; struct ldc_conn *lc = &sc->sc_lc; struct ldc_map *map = sc->sc_lm; struct mbuf *m; paddr_t pa; caddr_t buf; uint64_t tx_head, tx_tail, tx_state; u_int start, prod, count; int err; if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) return; if (IFQ_IS_EMPTY(&ifp->if_snd)) return; /* * We cannot transmit packets until a VIO connection has been * established. */ if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) || !ISSET(sc->sc_vio_state, VIO_ACK_RDX)) return; /* * Make sure there is room in the LDC transmit queue to send a * DRING_DATA message. */ err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); if (err != H_EOK) return; tx_tail += sizeof(struct ldc_pkt); tx_tail &= ((lc->lc_txq->lq_nentries * sizeof(struct ldc_pkt)) - 1); if (tx_tail == tx_head) { ifp->if_flags |= IFF_OACTIVE; return; } if (sc->sc_xfer_mode == VIO_DESC_MODE) { vnet_start_desc(ifp); return; } start = prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1); while (sc->sc_vd->vd_desc[prod].hdr.dstate == VIO_DESC_FREE) { IFQ_POLL(&ifp->if_snd, m); if (m == NULL) break; count = sc->sc_tx_prod - sc->sc_tx_cons; if (count >= (sc->sc_vd->vd_nentries - 1) || map->lm_count >= map->lm_nentries) { ifp->if_flags |= IFF_OACTIVE; break; } buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO); if (buf == NULL) { ifp->if_flags |= IFF_OACTIVE; break; } m_copydata(m, 0, m->m_pkthdr.len, buf + VNET_ETHER_ALIGN); IFQ_DEQUEUE(&ifp->if_snd, m); #if NBPFILTER > 0 /* * If BPF is listening on this interface, let it see the * packet before we commit it to the wire. */ if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); #endif pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa); KASSERT((pa & ~PAGE_MASK) == (pa & LDC_MTE_RA_MASK)); while (map->lm_slot[map->lm_next].entry != 0) { map->lm_next++; map->lm_next &= (map->lm_nentries - 1); } map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK); map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR; atomic_inc_int(&map->lm_count); sc->sc_vd->vd_desc[prod].nbytes = max(m->m_pkthdr.len, 60); sc->sc_vd->vd_desc[prod].ncookies = 1; sc->sc_vd->vd_desc[prod].cookie[0].addr = map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK); sc->sc_vd->vd_desc[prod].cookie[0].size = 2048; membar_producer(); sc->sc_vd->vd_desc[prod].hdr.dstate = VIO_DESC_READY; sc->sc_vsd[prod].vsd_map_idx = map->lm_next; sc->sc_vsd[prod].vsd_buf = buf; sc->sc_tx_prod++; prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1); m_freem(m); } membar_producer(); if (start != prod && sc->sc_peer_state != VIO_DP_ACTIVE) { vnet_send_dring_data(sc, start); ifp->if_timer = 5; } }
void cpsw_start(struct ifnet *ifp) { struct cpsw_softc * const sc = ifp->if_softc; struct cpsw_ring_data * const rdp = sc->sc_rdp; struct cpsw_cpdma_bd bd; struct mbuf *m; bus_dmamap_t dm; u_int eopi = ~0; u_int seg; u_int txfree; int txstart = -1; int error; bool pad; u_int mlen; if (!ISSET(ifp->if_flags, IFF_RUNNING) || ISSET(ifp->if_flags, IFF_OACTIVE) || IFQ_IS_EMPTY(&ifp->if_snd)) return; if (sc->sc_txnext >= sc->sc_txhead) txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext; else txfree = sc->sc_txhead - sc->sc_txnext - 1; for (;;) { if (txfree <= CPSW_TXFRAGS) { SET(ifp->if_flags, IFF_OACTIVE); break; } IFQ_POLL(&ifp->if_snd, m); if (m == NULL) break; IFQ_DEQUEUE(&ifp->if_snd, m); dm = rdp->tx_dm[sc->sc_txnext]; error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT); switch (error) { case 0: break; case EFBIG: /* mbuf chain is too fragmented */ if (m_defrag(m, M_DONTWAIT) == 0 && bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT) == 0) break; /* FALLTHROUGH */ default: m_freem(m); ifp->if_oerrors++; continue; } mlen = dm->dm_mapsize; pad = mlen < CPSW_PAD_LEN; KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL); rdp->tx_mb[sc->sc_txnext] = m; #if NBPFILTER > 0 if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); #endif bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize, BUS_DMASYNC_PREWRITE); if (txstart == -1) txstart = sc->sc_txnext; eopi = sc->sc_txnext; for (seg = 0; seg < dm->dm_nsegs; seg++) { bd.next = cpsw_txdesc_paddr(sc, TXDESC_NEXT(sc->sc_txnext)); bd.bufptr = dm->dm_segs[seg].ds_addr; bd.bufoff = 0; bd.buflen = dm->dm_segs[seg].ds_len; bd.pktlen = 0; bd.flags = 0; if (seg == 0) { bd.flags = CPDMA_BD_OWNER | CPDMA_BD_SOP; bd.pktlen = MAX(mlen, CPSW_PAD_LEN); } if (seg == dm->dm_nsegs - 1 && !pad) bd.flags |= CPDMA_BD_EOP; cpsw_set_txdesc(sc, sc->sc_txnext, &bd); txfree--; eopi = sc->sc_txnext; sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext); } if (pad) { bd.next = cpsw_txdesc_paddr(sc, TXDESC_NEXT(sc->sc_txnext)); bd.bufptr = sc->sc_txpad_pa; bd.bufoff = 0; bd.buflen = CPSW_PAD_LEN - mlen; bd.pktlen = 0; bd.flags = CPDMA_BD_EOP; cpsw_set_txdesc(sc, sc->sc_txnext, &bd); txfree--; eopi = sc->sc_txnext; sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext); } } if (txstart >= 0) { ifp->if_timer = 5; /* terminate the new chain */ KASSERT(eopi == TXDESC_PREV(sc->sc_txnext)); cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0); /* link the new chain on */ cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart), cpsw_txdesc_paddr(sc, txstart)); if (sc->sc_txeoq) { /* kick the dma engine */ sc->sc_txeoq = false; bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0), cpsw_txdesc_paddr(sc, txstart)); } } }