static int gx_transmit(struct ifnet *ifp, struct mbuf *m) { struct gx_softc *sc; sc = ifp->if_softc; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING) { m_freem(m); return (0); } GXEMUL_ETHER_LOCK(sc); GXEMUL_ETHER_DEV_WRITE(GXEMUL_ETHER_DEV_LENGTH, m->m_pkthdr.len); m_copydata(m, 0, m->m_pkthdr.len, (void *)(uintptr_t)GXEMUL_ETHER_DEV_FUNCTION(GXEMUL_ETHER_DEV_BUFFER)); GXEMUL_ETHER_DEV_WRITE(GXEMUL_ETHER_DEV_COMMAND, GXEMUL_ETHER_DEV_COMMAND_TX); GXEMUL_ETHER_UNLOCK(sc); ETHER_BPF_MTAP(ifp, m); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); m_freem(m); return (0); }
/* Async. stream output */ static void fwe_as_output(struct fwe_softc *fwe, struct ifnet *ifp) { struct mbuf *m; struct fw_xfer *xfer; struct fw_xferq *xferq; struct fw_pkt *fp; int i = 0; xfer = NULL; xferq = fwe->fd.fc->atq; while ((xferq->queued < xferq->maxq - 1) && (ifp->if_snd.ifq_head != NULL)) { FWE_LOCK(fwe); xfer = STAILQ_FIRST(&fwe->xferlist); if (xfer == NULL) { #if 0 printf("if_fwe: lack of xfer\n"); #endif FWE_UNLOCK(fwe); break; } STAILQ_REMOVE_HEAD(&fwe->xferlist, link); FWE_UNLOCK(fwe); IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { FWE_LOCK(fwe); STAILQ_INSERT_HEAD(&fwe->xferlist, xfer, link); FWE_UNLOCK(fwe); break; } BPF_MTAP(ifp, m); /* keep ip packet alignment for alpha */ M_PREPEND(m, ETHER_ALIGN, M_NOWAIT); fp = &xfer->send.hdr; *(uint32_t *)&xfer->send.hdr = *(int32_t *)&fwe->pkt_hdr; fp->mode.stream.len = m->m_pkthdr.len; xfer->mbuf = m; xfer->send.pay_len = m->m_pkthdr.len; if (fw_asyreq(fwe->fd.fc, -1, xfer) != 0) { /* error */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* XXX set error code */ fwe_output_callback(xfer); } else { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); i++; } } #if 0 if (i > 1) printf("%d queued\n", i); #endif if (i > 0) xferq->start(fwe->fd.fc); }
static void gx_rx_intr(void *arg) { struct gx_softc *sc = arg; GXEMUL_ETHER_LOCK(sc); for (;;) { uint64_t status, length; struct mbuf *m; /* * XXX * Limit number of packets received at once? */ status = GXEMUL_ETHER_DEV_READ(GXEMUL_ETHER_DEV_STATUS); if (status == GXEMUL_ETHER_DEV_STATUS_RX_MORE) { GXEMUL_ETHER_DEV_WRITE(GXEMUL_ETHER_DEV_COMMAND, GXEMUL_ETHER_DEV_COMMAND_RX); continue; } if (status != GXEMUL_ETHER_DEV_STATUS_RX_OK) break; length = GXEMUL_ETHER_DEV_READ(GXEMUL_ETHER_DEV_LENGTH); if (length > MCLBYTES - ETHER_ALIGN) { if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); continue; } m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->sc_dev, "no memory for receive mbuf.\n"); if_inc_counter(sc->sc_ifp, IFCOUNTER_IQDROPS, 1); GXEMUL_ETHER_UNLOCK(sc); return; } /* Align incoming frame so IP headers are aligned. */ m->m_data += ETHER_ALIGN; memcpy(m->m_data, (const void *)(uintptr_t)GXEMUL_ETHER_DEV_FUNCTION(GXEMUL_ETHER_DEV_BUFFER), length); m->m_pkthdr.rcvif = sc->sc_ifp; m->m_pkthdr.len = m->m_len = length; if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); GXEMUL_ETHER_UNLOCK(sc); (*sc->sc_ifp->if_input)(sc->sc_ifp, m); GXEMUL_ETHER_LOCK(sc); } GXEMUL_ETHER_UNLOCK(sc); }
static void vtbe_proc_rx(struct vtbe_softc *sc, struct vqueue_info *vq) { struct iovec iov[DESC_COUNT]; struct ifnet *ifp; struct uio uio; struct mbuf *m; int iolen; int i; int n; ifp = sc->ifp; n = vq_getchain(sc->beri_mem_offset, vq, iov, DESC_COUNT, NULL); KASSERT(n >= 1 && n <= DESC_COUNT, ("wrong n %d", n)); iolen = 0; for (i = 1; i < n; i++) { iolen += iov[i].iov_len; } uio.uio_resid = iolen; uio.uio_iov = &iov[1]; uio.uio_segflg = UIO_SYSSPACE; uio.uio_iovcnt = (n - 1); uio.uio_rw = UIO_WRITE; if ((m = m_uiotombuf(&uio, M_NOWAIT, 0, ETHER_ALIGN, M_PKTHDR)) == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto done; } m->m_pkthdr.rcvif = ifp; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); CURVNET_SET(ifp->if_vnet); VTBE_UNLOCK(sc); (*ifp->if_input)(ifp, m); VTBE_LOCK(sc); CURVNET_RESTORE(); done: vq_relchain(vq, iov, n, iolen + sc->hdrsize); }
static void fwe_output_callback(struct fw_xfer *xfer) { struct fwe_softc *fwe; struct ifnet *ifp; int s; fwe = (struct fwe_softc *)xfer->sc; ifp = fwe->eth_softc.ifp; /* XXX error check */ FWEDEBUG(ifp, "resp = %d\n", xfer->resp); if (xfer->resp != 0) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(xfer->mbuf); fw_xfer_unload(xfer); s = splimp(); FWE_LOCK(fwe); STAILQ_INSERT_TAIL(&fwe->xferlist, xfer, link); FWE_UNLOCK(fwe); splx(s); /* for queue full */ if (ifp->if_snd.ifq_head != NULL) fwe_start(ifp); }
static void qls_tx_comp(qla_host_t *ha, uint32_t txr_idx, q81_tx_mac_comp_t *tx_comp) { qla_tx_buf_t *txb; uint32_t tx_idx = tx_comp->tid_lo; if (tx_idx >= NUM_TX_DESCRIPTORS) { ha->qla_initiate_recovery = 1; return; } txb = &ha->tx_ring[txr_idx].tx_buf[tx_idx]; if (txb->m_head) { if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1); bus_dmamap_sync(ha->tx_tag, txb->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ha->tx_tag, txb->map); m_freem(txb->m_head); txb->m_head = NULL; } ha->tx_ring[txr_idx].txr_done++; if (ha->tx_ring[txr_idx].txr_done == NUM_TX_DESCRIPTORS) ha->tx_ring[txr_idx].txr_done = 0; }
static void fwe_start(struct ifnet *ifp) { struct fwe_softc *fwe = ((struct fwe_eth_softc *)ifp->if_softc)->fwe; int s; FWEDEBUG(ifp, "starting\n"); if (fwe->dma_ch < 0) { struct mbuf *m = NULL; FWEDEBUG(ifp, "not ready\n"); s = splimp(); do { IF_DEQUEUE(&ifp->if_snd, m); if (m != NULL) m_freem(m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } while (m != NULL); splx(s); return; } s = splimp(); ifp->if_drv_flags |= IFF_DRV_OACTIVE; if (ifp->if_snd.ifq_len != 0) fwe_as_output(fwe, ifp); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; splx(s); }
static void octm_rx_intr(void *arg) { struct octm_softc *sc = arg; cvmx_mixx_isr_t mixx_isr; int len; mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(sc->sc_port)); if (!mixx_isr.s.irthresh) { device_printf(sc->sc_dev, "stray interrupt.\n"); return; } for (;;) { struct mbuf *m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->sc_dev, "no memory for receive mbuf.\n"); return; } len = cvmx_mgmt_port_receive(sc->sc_port, MCLBYTES, m->m_data); if (len > 0) { m->m_pkthdr.rcvif = sc->sc_ifp; m->m_pkthdr.len = m->m_len = len; if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); (*sc->sc_ifp->if_input)(sc->sc_ifp, m); continue; } m_freem(m); if (len == 0) break; if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); } /* Acknowledge interrupts. */ cvmx_write_csr(CVMX_MIXX_ISR(sc->sc_port), mixx_isr.u64); cvmx_read_csr(CVMX_MIXX_ISR(sc->sc_port)); }
static void cdce_ncm_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct cdce_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); uint16_t x; uint8_t temp; int actlen; int aframes; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL); DPRINTFN(10, "transfer complete: " "%u bytes in %u frames\n", actlen, aframes); case USB_ST_SETUP: for (x = 0; x != CDCE_NCM_TX_FRAMES_MAX; x++) { temp = cdce_ncm_fill_tx_frames(xfer, x); if (temp == 0) break; if (temp == 1) { x++; break; } } if (x != 0) { #ifdef USB_DEBUG usbd_xfer_set_interval(xfer, cdce_tx_interval); #endif usbd_xfer_set_frames(xfer, x); usbd_transfer_submit(xfer); } break; default: /* Error */ DPRINTFN(10, "Transfer error: %s\n", usbd_errstr(error)); /* update error counter */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (error != USB_ERR_CANCELLED) { if (usbd_get_mode(sc->sc_ue.ue_udev) == USB_MODE_HOST) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); usbd_xfer_set_frames(xfer, 0); usbd_transfer_submit(xfer); } } break; } }
/* * Device timeout/watchdog routine. Entered if the device neglects to * generate an interrupt after a transmit has been started on it. */ static void ed_watchdog(struct ed_softc *sc) { struct ifnet *ifp; ifp = sc->ifp; log(LOG_ERR, "%s: device timeout\n", ifp->if_xname); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ed_reset(ifp); }
static int octm_transmit(struct ifnet *ifp, struct mbuf *m) { struct octm_softc *sc; cvmx_mgmt_port_result_t result; sc = ifp->if_softc; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { m_freem(m); return (0); } result = cvmx_mgmt_port_sendm(sc->sc_port, m); if (result == CVMX_MGMT_PORT_SUCCESS) { ETHER_BPF_MTAP(ifp, m); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); } else if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); switch (result) { case CVMX_MGMT_PORT_SUCCESS: return (0); case CVMX_MGMT_PORT_NO_MEMORY: return (ENOBUFS); case CVMX_MGMT_PORT_INVALID_PARAM: return (ENXIO); case CVMX_MGMT_PORT_INIT_ERROR: return (EIO); default: return (EDOOFUS); } }
/* * Process a received frame in monitor mode. */ static int monitor_input(struct ieee80211_node *ni, struct mbuf *m, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = vap->iv_ifp; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); m_freem(m); return -1; }
static int usie_if_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { int err; DPRINTF("proto=%x\n", dst->sa_family); switch (dst->sa_family) { #ifdef INET6 case AF_INET6; /* fall though */ #endif case AF_INET: break; /* silently drop dhclient packets */ case AF_UNSPEC: m_freem(m); return (0); /* drop other packet types */ default: m_freem(m); return (EAFNOSUPPORT); } err = (ifp->if_transmit)(ifp, m); if (err) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return (ENOBUFS); } if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); return (0); }
static void ffec_harvest_stats(struct ffec_softc *sc) { struct ifnet *ifp; /* We don't need to harvest too often. */ if (++sc->stats_harvest_count < STATS_HARVEST_INTERVAL) return; /* * Try to avoid harvesting unless the IDLE flag is on, but if it has * been too long just go ahead and do it anyway, the worst that'll * happen is we'll lose a packet count or two as we clear at the end. */ if (sc->stats_harvest_count < (2 * STATS_HARVEST_INTERVAL) && ((RD4(sc, FEC_MIBC_REG) & FEC_MIBC_IDLE) == 0)) return; sc->stats_harvest_count = 0; ifp = sc->ifp; if_inc_counter(ifp, IFCOUNTER_IPACKETS, RD4(sc, FEC_RMON_R_PACKETS)); if_inc_counter(ifp, IFCOUNTER_IMCASTS, RD4(sc, FEC_RMON_R_MC_PKT)); if_inc_counter(ifp, IFCOUNTER_IERRORS, RD4(sc, FEC_RMON_R_CRC_ALIGN) + RD4(sc, FEC_RMON_R_UNDERSIZE) + RD4(sc, FEC_RMON_R_OVERSIZE) + RD4(sc, FEC_RMON_R_FRAG) + RD4(sc, FEC_RMON_R_JAB)); if_inc_counter(ifp, IFCOUNTER_OPACKETS, RD4(sc, FEC_RMON_T_PACKETS)); if_inc_counter(ifp, IFCOUNTER_OMCASTS, RD4(sc, FEC_RMON_T_MC_PKT)); if_inc_counter(ifp, IFCOUNTER_OERRORS, RD4(sc, FEC_RMON_T_CRC_ALIGN) + RD4(sc, FEC_RMON_T_UNDERSIZE) + RD4(sc, FEC_RMON_T_OVERSIZE) + RD4(sc, FEC_RMON_T_FRAG) + RD4(sc, FEC_RMON_T_JAB)); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, RD4(sc, FEC_RMON_T_COL)); ffec_clear_stats(sc); }
/* * FDDI output routine. * Encapsulate a packet of type family for the local net. * Use trailer local net encapsulation if enough data in first * packet leaves a multiple of 512 bytes of data in remainder. */ static int fddi_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { u_int16_t type; int loop_copy = 0, error = 0, hdrcmplt = 0; u_char esrc[FDDI_ADDR_LEN], edst[FDDI_ADDR_LEN]; struct fddi_header *fh; #if defined(INET) || defined(INET6) int is_gw = 0; #endif #ifdef MAC error = mac_ifnet_check_transmit(ifp, m); if (error) senderr(error); #endif if (ifp->if_flags & IFF_MONITOR) senderr(ENETDOWN); if (!((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))) senderr(ENETDOWN); getmicrotime(&ifp->if_lastchange); #if defined(INET) || defined(INET6) if (ro != NULL) is_gw = (ro->ro_flags & RT_HAS_GW) != 0; #endif switch (dst->sa_family) { #ifdef INET case AF_INET: { error = arpresolve(ifp, is_gw, m, dst, edst, NULL, NULL); if (error) return (error == EWOULDBLOCK ? 0 : error); type = htons(ETHERTYPE_IP); break; } case AF_ARP: { struct arphdr *ah; ah = mtod(m, struct arphdr *); ah->ar_hrd = htons(ARPHRD_ETHER); loop_copy = -1; /* if this is for us, don't do it */ switch (ntohs(ah->ar_op)) { case ARPOP_REVREQUEST: case ARPOP_REVREPLY: type = htons(ETHERTYPE_REVARP); break; case ARPOP_REQUEST: case ARPOP_REPLY: default: type = htons(ETHERTYPE_ARP); break; } if (m->m_flags & M_BCAST) bcopy(ifp->if_broadcastaddr, edst, FDDI_ADDR_LEN); else bcopy(ar_tha(ah), edst, FDDI_ADDR_LEN); } break; #endif /* INET */ #ifdef INET6 case AF_INET6: error = nd6_resolve(ifp, is_gw, m, dst, edst, NULL, NULL); if (error) return (error == EWOULDBLOCK ? 0 : error); type = htons(ETHERTYPE_IPV6); break; #endif /* INET6 */ case pseudo_AF_HDRCMPLT: { const struct ether_header *eh; hdrcmplt = 1; eh = (const struct ether_header *)dst->sa_data; bcopy(eh->ether_shost, esrc, FDDI_ADDR_LEN); /* FALLTHROUGH */ } case AF_UNSPEC: { const struct ether_header *eh; loop_copy = -1; eh = (const struct ether_header *)dst->sa_data; bcopy(eh->ether_dhost, edst, FDDI_ADDR_LEN); if (*edst & 1) m->m_flags |= (M_BCAST|M_MCAST); type = eh->ether_type; break; } case AF_IMPLINK: { fh = mtod(m, struct fddi_header *); error = EPROTONOSUPPORT; switch (fh->fddi_fc & (FDDIFC_C|FDDIFC_L|FDDIFC_F)) { case FDDIFC_LLC_ASYNC: { /* legal priorities are 0 through 7 */ if ((fh->fddi_fc & FDDIFC_Z) > 7) goto bad; break; } case FDDIFC_LLC_SYNC: { /* FDDIFC_Z bits reserved, must be zero */ if (fh->fddi_fc & FDDIFC_Z) goto bad; break; } case FDDIFC_SMT: { /* FDDIFC_Z bits must be non zero */ if ((fh->fddi_fc & FDDIFC_Z) == 0) goto bad; break; } default: { /* anything else is too dangerous */ goto bad; } } error = 0; if (fh->fddi_dhost[0] & 1) m->m_flags |= (M_BCAST|M_MCAST); goto queue_it; } default: if_printf(ifp, "can't handle af%d\n", dst->sa_family); senderr(EAFNOSUPPORT); } /* * Add LLC header. */ if (type != 0) { struct llc *l; M_PREPEND(m, LLC_SNAPFRAMELEN, M_NOWAIT); if (m == NULL) senderr(ENOBUFS); l = mtod(m, struct llc *); l->llc_control = LLC_UI; l->llc_dsap = l->llc_ssap = LLC_SNAP_LSAP; l->llc_snap.org_code[0] = l->llc_snap.org_code[1] = l->llc_snap.org_code[2] = 0; l->llc_snap.ether_type = htons(type); } /* * Add local net header. If no space in first mbuf, * allocate another. */ M_PREPEND(m, FDDI_HDR_LEN, M_NOWAIT); if (m == NULL) senderr(ENOBUFS); fh = mtod(m, struct fddi_header *); fh->fddi_fc = FDDIFC_LLC_ASYNC|FDDIFC_LLC_PRIO4; bcopy((caddr_t)edst, (caddr_t)fh->fddi_dhost, FDDI_ADDR_LEN); queue_it: if (hdrcmplt) bcopy((caddr_t)esrc, (caddr_t)fh->fddi_shost, FDDI_ADDR_LEN); else bcopy(IF_LLADDR(ifp), (caddr_t)fh->fddi_shost, FDDI_ADDR_LEN); /* * If a simplex interface, and the packet is being sent to our * Ethernet address or a broadcast address, loopback a copy. * XXX To make a simplex device behave exactly like a duplex * device, we should copy in the case of sending to our own * ethernet address (thus letting the original actually appear * on the wire). However, we don't do that here for security * reasons and compatibility with the original behavior. */ if ((ifp->if_flags & IFF_SIMPLEX) && (loop_copy != -1)) { if ((m->m_flags & M_BCAST) || (loop_copy > 0)) { struct mbuf *n; n = m_copym(m, 0, M_COPYALL, M_NOWAIT); (void) if_simloop(ifp, n, dst->sa_family, FDDI_HDR_LEN); } else if (bcmp(fh->fddi_dhost, fh->fddi_shost, FDDI_ADDR_LEN) == 0) { (void) if_simloop(ifp, m, dst->sa_family, FDDI_HDR_LEN); return (0); /* XXX */ } } error = (ifp->if_transmit)(ifp, m); if (error) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return (error); bad: if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (m) m_freem(m); return (error); }
static void ffec_rxfinish_onebuf(struct ffec_softc *sc, int len) { struct mbuf *m, *newmbuf; struct ffec_bufmap *bmap; uint8_t *dst, *src; int error; /* * First try to get a new mbuf to plug into this slot in the rx ring. * If that fails, drop the current packet and recycle the current * mbuf, which is still mapped and loaded. */ if ((newmbuf = ffec_alloc_mbufcl(sc)) == NULL) { if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1); ffec_setup_rxdesc(sc, sc->rx_idx, sc->rxdesc_ring[sc->rx_idx].buf_paddr); return; } /* * Unfortunately, the protocol headers need to be aligned on a 32-bit * boundary for the upper layers. The hardware requires receive * buffers to be 16-byte aligned. The ethernet header is 14 bytes, * leaving the protocol header unaligned. We used m_adj() after * allocating the buffer to leave empty space at the start of the * buffer, now we'll use the alignment agnostic bcopy() routine to * shuffle all the data backwards 2 bytes and adjust m_data. * * XXX imx6 hardware is able to do this 2-byte alignment by setting the * SHIFT16 bit in the RACC register. Older hardware doesn't have that * feature, but for them could we speed this up by copying just the * protocol headers into their own small mbuf then chaining the cluster * to it? That way we'd only need to copy like 64 bytes or whatever * the biggest header is, instead of the whole 1530ish-byte frame. */ FFEC_UNLOCK(sc); bmap = &sc->rxbuf_map[sc->rx_idx]; len -= ETHER_CRC_LEN; bus_dmamap_sync(sc->rxbuf_tag, bmap->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxbuf_tag, bmap->map); m = bmap->mbuf; bmap->mbuf = NULL; m->m_len = len; m->m_pkthdr.len = len; m->m_pkthdr.rcvif = sc->ifp; src = mtod(m, uint8_t*); dst = src - ETHER_ALIGN; bcopy(src, dst, len); m->m_data = dst; sc->ifp->if_input(sc->ifp, m); FFEC_LOCK(sc); if ((error = ffec_setup_rxbuf(sc, sc->rx_idx, newmbuf)) != 0) { device_printf(sc->dev, "ffec_setup_rxbuf error %d\n", error); /* XXX Now what? We've got a hole in the rx ring. */ } }
/* * Name: qla_rx_intr * Function: Handles normal ethernet frames received */ static void qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx) { qla_rx_buf_t *rxb; struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL; struct ifnet *ifp = ha->ifp; qla_sds_t *sdsp; struct ether_vlan_header *eh; uint32_t i, rem_len = 0; uint32_t r_idx = 0; qla_rx_ring_t *rx_ring; if (ha->hw.num_rds_rings > 1) r_idx = sds_idx; ha->hw.rds[r_idx].count++; sdsp = &ha->hw.sds[sds_idx]; rx_ring = &ha->rx_ring[r_idx]; for (i = 0; i < sgc->num_handles; i++) { rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF]; QL_ASSERT(ha, (rxb != NULL), ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\ sds_idx)); if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) { /* log the error */ device_printf(ha->pci_dev, "%s invalid rxb[%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return; } mp = rxb->m_head; if (i == 0) mpf = mp; QL_ASSERT(ha, (mp != NULL), ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\ sds_idx)); bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD); rxb->m_head = NULL; rxb->next = sdsp->rxb_free; sdsp->rxb_free = rxb; sdsp->rx_free++; if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) { /* log the error */ device_printf(ha->pci_dev, "%s mp == NULL [%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return; } if (i == 0) { mpl = mpf = mp; mp->m_flags |= M_PKTHDR; mp->m_pkthdr.len = sgc->pkt_length; mp->m_pkthdr.rcvif = ifp; rem_len = mp->m_pkthdr.len; } else { mp->m_flags &= ~M_PKTHDR; mpl->m_next = mp; mpl = mp; rem_len = rem_len - mp->m_len; } } mpl->m_len = rem_len; eh = mtod(mpf, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { uint32_t *data = (uint32_t *)eh; mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag); mpf->m_flags |= M_VLANTAG; *(data + 3) = *(data + 2); *(data + 2) = *(data + 1); *(data + 1) = *data; m_adj(mpf, ETHER_VLAN_ENCAP_LEN); } if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) { mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; mpf->m_pkthdr.csum_data = 0xFFFF; } else { mpf->m_pkthdr.csum_flags = 0; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); mpf->m_pkthdr.flowid = sgc->rss_hash; M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE); (*ifp->if_input)(ifp, mpf); if (sdsp->rx_free > ha->std_replenish) qla_replenish_normal_rx(ha, sdsp, r_idx); return; }
/* * Name: qla_lro_intr * Function: Handles normal ethernet frames received */ static int qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx) { qla_rx_buf_t *rxb; struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL; struct ifnet *ifp = ha->ifp; qla_sds_t *sdsp; struct ether_vlan_header *eh; uint32_t i, rem_len = 0, pkt_length, iplen; struct tcphdr *th; struct ip *ip = NULL; struct ip6_hdr *ip6 = NULL; uint16_t etype; uint32_t r_idx = 0; qla_rx_ring_t *rx_ring; if (ha->hw.num_rds_rings > 1) r_idx = sds_idx; ha->hw.rds[r_idx].count++; rx_ring = &ha->rx_ring[r_idx]; ha->lro_pkt_count++; sdsp = &ha->hw.sds[sds_idx]; pkt_length = sgc->payload_length + sgc->l4_offset; if (sgc->flags & Q8_LRO_COMP_TS) { pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE; } else { pkt_length += QLA_TCP_HDR_SIZE; } ha->lro_bytes += pkt_length; for (i = 0; i < sgc->num_handles; i++) { rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF]; QL_ASSERT(ha, (rxb != NULL), ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\ sds_idx)); if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) { /* log the error */ device_printf(ha->pci_dev, "%s invalid rxb[%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return (0); } mp = rxb->m_head; if (i == 0) mpf = mp; QL_ASSERT(ha, (mp != NULL), ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\ sds_idx)); bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD); rxb->m_head = NULL; rxb->next = sdsp->rxb_free; sdsp->rxb_free = rxb; sdsp->rx_free++; if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) { /* log the error */ device_printf(ha->pci_dev, "%s mp == NULL [%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return (0); } if (i == 0) { mpl = mpf = mp; mp->m_flags |= M_PKTHDR; mp->m_pkthdr.len = pkt_length; mp->m_pkthdr.rcvif = ifp; rem_len = mp->m_pkthdr.len; } else { mp->m_flags &= ~M_PKTHDR; mpl->m_next = mp; mpl = mp; rem_len = rem_len - mp->m_len; } } mpl->m_len = rem_len; th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset); if (sgc->flags & Q8_LRO_COMP_PUSH_BIT) th->th_flags |= TH_PUSH; m_adj(mpf, sgc->l2_offset); eh = mtod(mpf, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { uint32_t *data = (uint32_t *)eh; mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag); mpf->m_flags |= M_VLANTAG; *(data + 3) = *(data + 2); *(data + 2) = *(data + 1); *(data + 1) = *data; m_adj(mpf, ETHER_VLAN_ENCAP_LEN); etype = ntohs(eh->evl_proto); } else { etype = ntohs(eh->evl_encap_proto); } if (etype == ETHERTYPE_IP) { ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN); iplen = (ip->ip_hl << 2) + (th->th_off << 2) + sgc->payload_length; ip->ip_len = htons(iplen); ha->ipv4_lro++; } else if (etype == ETHERTYPE_IPV6) { ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN); iplen = (th->th_off << 2) + sgc->payload_length; ip6->ip6_plen = htons(iplen); ha->ipv6_lro++; } else { m_freem(mpf); if (sdsp->rx_free > ha->std_replenish) qla_replenish_normal_rx(ha, sdsp, r_idx); return 0; } mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; mpf->m_pkthdr.csum_data = 0xFFFF; mpf->m_pkthdr.flowid = sgc->rss_hash; M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); (*ifp->if_input)(ifp, mpf); if (sdsp->rx_free > ha->std_replenish) qla_replenish_normal_rx(ha, sdsp, r_idx); return (0); }
static void ipheth_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct ipheth_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct usb_page_cache *pc; struct mbuf *m; uint8_t x; int actlen; int aframes; usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL); DPRINTFN(1, "\n"); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete: %u bytes in %u frames\n", actlen, aframes); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); /* free all previous TX buffers */ ipheth_free_queue(sc->sc_tx_buf, IPHETH_TX_FRAMES_MAX); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: for (x = 0; x != IPHETH_TX_FRAMES_MAX; x++) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; usbd_xfer_set_frame_offset(xfer, x * IPHETH_BUF_SIZE, x); pc = usbd_xfer_get_frame(xfer, x); sc->sc_tx_buf[x] = m; if (m->m_pkthdr.len > IPHETH_BUF_SIZE) m->m_pkthdr.len = IPHETH_BUF_SIZE; usbd_m_copy_in(pc, 0, m, 0, m->m_pkthdr.len); usbd_xfer_set_frame_len(xfer, x, IPHETH_BUF_SIZE); if (IPHETH_BUF_SIZE != m->m_pkthdr.len) { usbd_frame_zero(pc, m->m_pkthdr.len, IPHETH_BUF_SIZE - m->m_pkthdr.len); } /* * If there's a BPF listener, bounce a copy of * this frame to him: */ BPF_MTAP(ifp, m); } if (x != 0) { usbd_xfer_set_frames(xfer, x); usbd_transfer_submit(xfer); } break; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); /* free all previous TX buffers */ ipheth_free_queue(sc->sc_tx_buf, IPHETH_TX_FRAMES_MAX); /* count output errors */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } }
static void vtbe_txstart_locked(struct vtbe_softc *sc) { struct virtio_net_hdr_mrg_rxbuf *vnh; struct iovec iov[DESC_COUNT]; struct vqueue_info *vq; struct iovec *riov; struct ifnet *ifp; struct mbuf *m; struct uio uio; int enqueued; int iolen; int error; int *addr; int reg; int len; int n; VTBE_ASSERT_LOCKED(sc); /* RX queue */ vq = &sc->vs_queues[0]; if (!vq_has_descs(vq)) { return; } ifp = sc->ifp; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { return; } enqueued = 0; if (!vq_ring_ready(vq)) return; vq->vq_save_used = be16toh(vq->vq_used->idx); for (;;) { if (!vq_has_descs(vq)) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { break; } n = vq_getchain(sc->beri_mem_offset, vq, iov, DESC_COUNT, NULL); KASSERT(n >= 1 && n <= DESC_COUNT, ("wrong descriptors num %d", n)); addr = iov[0].iov_base; len = iov[0].iov_len; vnh = iov[0].iov_base; memset(vnh, 0, sc->hdrsize); vnh->num_buffers = htobe16(1); iov[0].iov_len -= sc->hdrsize; iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + sc->hdrsize); riov = &iov[0]; uio.uio_resid = iov[0].iov_len; uio.uio_iov = riov; uio.uio_segflg = UIO_SYSSPACE; uio.uio_iovcnt = 1; uio.uio_offset = 0; uio.uio_rw = UIO_READ; error = m_mbuftouio(&uio, m, 0); if (error) panic("m_mbuftouio failed\n"); iolen = (len - iov[0].iov_len - sc->hdrsize); vq_relchain(vq, iov, 0, iolen + sc->hdrsize); paddr_unmap((void *)addr, len); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); BPF_MTAP(ifp, m); m_freem(m); ++enqueued; } if (enqueued != 0) { reg = htobe32(VIRTIO_MMIO_INT_VRING); WRITE4(sc, VIRTIO_MMIO_INTERRUPT_STATUS, reg); PIO_SET(sc->pio_send, Q_INTR, 1); } }
/* Async. stream output */ static void fwe_as_input(struct fw_xferq *xferq) { struct mbuf *m, *m0; struct ifnet *ifp; struct fwe_softc *fwe; struct fw_bulkxfer *sxfer; struct fw_pkt *fp; u_char *c; fwe = (struct fwe_softc *)xferq->sc; ifp = fwe->eth_softc.ifp; /* We do not need a lock here because the bottom half is serialized */ while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) { STAILQ_REMOVE_HEAD(&xferq->stvalid, link); fp = mtod(sxfer->mbuf, struct fw_pkt *); if (fwe->fd.fc->irx_post != NULL) fwe->fd.fc->irx_post(fwe->fd.fc, fp->mode.ld); m = sxfer->mbuf; /* insert new rbuf */ sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m0 != NULL) { m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size; STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link); } else printf("%s: m_getcl failed\n", __FUNCTION__); if (sxfer->resp != 0 || fp->mode.stream.len < ETHER_ALIGN + sizeof(struct ether_header)) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); continue; } m->m_data += HDR_LEN + ETHER_ALIGN; c = mtod(m, u_char *); m->m_len = m->m_pkthdr.len = fp->mode.stream.len - ETHER_ALIGN; m->m_pkthdr.rcvif = ifp; #if 0 FWEDEBUG(ifp, "%02x %02x %02x %02x %02x %02x\n" "%02x %02x %02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n", c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7], c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15], c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23], c[20], c[21], c[22], c[23] ); #endif (*ifp->if_input)(ifp, m); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); } if (STAILQ_FIRST(&xferq->stfree) != NULL) fwe->fd.fc->irx_enable(fwe->fd.fc, fwe->dma_ch); }
/* * Ethernet interface interrupt processor */ void edintr(void *arg) { struct ed_softc *sc = (struct ed_softc*) arg; struct ifnet *ifp = sc->ifp; u_char isr; int count; ED_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { ED_UNLOCK(sc); return; } /* * Set NIC to page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * loop until there are no more new interrupts. When the card goes * away, the hardware will read back 0xff. Looking at the interrupts, * it would appear that 0xff is impossible as ED_ISR_RST is normally * clear. ED_ISR_RDC is also normally clear and only set while * we're transferring memory to the card and we're holding the * ED_LOCK (so we can't get into here). */ while ((isr = ed_nic_inb(sc, ED_P0_ISR)) != 0 && isr != 0xff) { /* * reset all the bits that we are 'acknowledging' by writing a * '1' to each bit position that was set (writing a '1' * *clears* the bit) */ ed_nic_outb(sc, ED_P0_ISR, isr); /* * The AX88190 and AX88190A has problems acking an interrupt * and having them clear. This interferes with top-level loop * here. Wait for all the bits to clear. * * We limit this to 5000 iterations. At 1us per inb/outb, * this translates to about 15ms, which should be plenty of * time, and also gives protection in the card eject case. */ if (sc->chip_type == ED_CHIP_TYPE_AX88190) { count = 5000; /* 15ms */ while (count-- && (ed_nic_inb(sc, ED_P0_ISR) & isr)) { ed_nic_outb(sc, ED_P0_ISR,0); ed_nic_outb(sc, ED_P0_ISR,isr); } if (count == 0) break; } /* * Handle transmitter interrupts. Handle these first because * the receiver will reset the board under some conditions. */ if (isr & (ED_ISR_PTX | ED_ISR_TXE)) { u_char collisions = ed_nic_inb(sc, ED_P0_NCR) & 0x0f; /* * Check for transmit error. If a TX completed with an * error, we end up throwing the packet away. Really * the only error that is possible is excessive * collisions, and in this case it is best to allow * the automatic mechanisms of TCP to backoff the * flow. Of course, with UDP we're screwed, but this * is expected when a network is heavily loaded. */ (void) ed_nic_inb(sc, ED_P0_TSR); if (isr & ED_ISR_TXE) { u_char tsr; /* * Excessive collisions (16) */ tsr = ed_nic_inb(sc, ED_P0_TSR); if ((tsr & ED_TSR_ABT) && (collisions == 0)) { /* * When collisions total 16, the * P0_NCR will indicate 0, and the * TSR_ABT is set. */ collisions = 16; sc->mibdata.dot3StatsExcessiveCollisions++; sc->mibdata.dot3StatsCollFrequencies[15]++; } if (tsr & ED_TSR_OWC) sc->mibdata.dot3StatsLateCollisions++; if (tsr & ED_TSR_CDH) sc->mibdata.dot3StatsSQETestErrors++; if (tsr & ED_TSR_CRS) sc->mibdata.dot3StatsCarrierSenseErrors++; if (tsr & ED_TSR_FU) sc->mibdata.dot3StatsInternalMacTransmitErrors++; /* * update output errors counter */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else { /* * Update total number of successfully * transmitted packets. */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } /* * reset tx busy and output active flags */ sc->xmit_busy = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* * clear watchdog timer */ sc->tx_timer = 0; /* * Add in total number of collisions on last * transmission. */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, collisions); switch(collisions) { case 0: case 16: break; case 1: sc->mibdata.dot3StatsSingleCollisionFrames++; sc->mibdata.dot3StatsCollFrequencies[0]++; break; default: sc->mibdata.dot3StatsMultipleCollisionFrames++; sc->mibdata. dot3StatsCollFrequencies[collisions-1] ++; break; } /* * Decrement buffer in-use count if not zero (can only * be zero if a transmitter interrupt occured while * not actually transmitting). If data is ready to * transmit, start it transmitting, otherwise defer * until after handling receiver */ if (sc->txb_inuse && --sc->txb_inuse) ed_xmit(sc); } /* * Handle receiver interrupts */ if (isr & (ED_ISR_PRX | ED_ISR_RXE | ED_ISR_OVW)) { /* * Overwrite warning. In order to make sure that a * lockup of the local DMA hasn't occurred, we reset * and re-init the NIC. The NSC manual suggests only a * partial reset/re-init is necessary - but some chips * seem to want more. The DMA lockup has been seen * only with early rev chips - Methinks this bug was * fixed in later revs. -DG */ if (isr & ED_ISR_OVW) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); #ifdef DIAGNOSTIC log(LOG_WARNING, "%s: warning - receiver ring buffer overrun\n", ifp->if_xname); #endif /* * Stop/reset/re-init NIC */ ed_reset(ifp); } else { /* * Receiver Error. One or more of: CRC error, * frame alignment error FIFO overrun, or * missed packet. */ if (isr & ED_ISR_RXE) { u_char rsr; rsr = ed_nic_inb(sc, ED_P0_RSR); if (rsr & ED_RSR_CRC) sc->mibdata.dot3StatsFCSErrors++; if (rsr & ED_RSR_FAE) sc->mibdata.dot3StatsAlignmentErrors++; if (rsr & ED_RSR_FO) sc->mibdata.dot3StatsInternalMacReceiveErrors++; if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); #ifdef ED_DEBUG if_printf(ifp, "receive error %x\n", ed_nic_inb(sc, ED_P0_RSR)); #endif } /* * Go get the packet(s) XXX - Doing this on an * error is dubious because there shouldn't be * any data to get (we've configured the * interface to not accept packets with * errors). */ /* * Enable 16bit access to shared memory first * on WD/SMC boards. */ ed_enable_16bit_access(sc); ed_rint(sc); ed_disable_16bit_access(sc); } } /* * If it looks like the transmitter can take more data, * attempt to start output on the interface. This is done * after handling the receiver to give the receiver priority. */ if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) ed_start_locked(ifp); /* * return NIC CR to standard state: page 0, remote DMA * complete, start (toggling the TXP bit off, even if was just * set in the transmit routine, is *okay* - it is 'edge' * triggered from low to high) */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * If the Network Talley Counters overflow, read them to reset * them. It appears that old 8390's won't clear the ISR flag * otherwise - resulting in an infinite loop. */ if (isr & ED_ISR_CNT) { (void) ed_nic_inb(sc, ED_P0_CNTR0); (void) ed_nic_inb(sc, ED_P0_CNTR1); (void) ed_nic_inb(sc, ED_P0_CNTR2); } } ED_UNLOCK(sc); }
static void kr_tx(struct kr_softc *sc) { struct kr_txdesc *txd; struct kr_desc *cur_tx; struct ifnet *ifp; uint32_t ctl, devcs; int cons, prod; KR_LOCK_ASSERT(sc); cons = sc->kr_cdata.kr_tx_cons; prod = sc->kr_cdata.kr_tx_prod; if (cons == prod) return; bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag, sc->kr_cdata.kr_tx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); ifp = sc->kr_ifp; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ for (; cons != prod; KR_INC(cons, KR_TX_RING_CNT)) { cur_tx = &sc->kr_rdata.kr_tx_ring[cons]; ctl = cur_tx->kr_ctl; devcs = cur_tx->kr_devcs; /* Check if descriptor has "finished" flag */ if ((ctl & KR_CTL_F) == 0) break; sc->kr_cdata.kr_tx_cnt--; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; txd = &sc->kr_cdata.kr_txdesc[cons]; if (devcs & KR_DMATX_DEVCS_TOK) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); else { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* collisions: medium busy, late collision */ if ((devcs & KR_DMATX_DEVCS_EC) || (devcs & KR_DMATX_DEVCS_LC)) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); } bus_dmamap_sync(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap); /* Free only if it's first descriptor in list */ if (txd->tx_m) m_freem(txd->tx_m); txd->tx_m = NULL; /* reset descriptor */ cur_tx->kr_ctl = KR_CTL_IOF; cur_tx->kr_devcs = 0; cur_tx->kr_ca = 0; cur_tx->kr_link = 0; } sc->kr_cdata.kr_tx_cons = cons; bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag, sc->kr_cdata.kr_tx_ring_map, BUS_DMASYNC_PREWRITE); }
/* * Handle the given receive status queue entry */ void patm_rx(struct patm_softc *sc, struct idt_rsqe *rsqe) { struct mbuf *m; void *buf; u_int stat, cid, w, cells, len, h; struct patm_vcc *vcc; struct atm_pseudohdr aph; u_char *trail; cid = le32toh(rsqe->cid); stat = le32toh(rsqe->stat); h = le32toh(rsqe->handle); cid = PATM_CID(sc, IDT_RSQE_VPI(cid), IDT_RSQE_VCI(cid)); vcc = sc->vccs[cid]; if (IDT_RSQE_TYPE(stat) == IDT_RSQE_IDLE) { /* connection has gone idle */ if (stat & IDT_RSQE_BUF) patm_rcv_free(sc, patm_rcv_handle(sc, h), h); w = rct_read(sc, cid, 0); if (w != 0 && !(w & IDT_RCT_OPEN)) rct_write(sc, cid, 0, 0); if (vcc != NULL && (vcc->vflags & PATM_VCC_RX_CLOSING)) { patm_debug(sc, VCC, "%u.%u RX closed", vcc->vcc.vpi, vcc->vcc.vci); vcc->vflags &= ~PATM_VCC_RX_CLOSING; if (vcc->vcc.flags & ATMIO_FLAG_ASYNC) { patm_rx_vcc_closed(sc, vcc); if (!(vcc->vflags & PATM_VCC_OPEN)) patm_vcc_closed(sc, vcc); } else cv_signal(&sc->vcc_cv); } return; } buf = patm_rcv_handle(sc, h); if (vcc == NULL || (vcc->vflags & PATM_VCC_RX_OPEN) == 0) { patm_rcv_free(sc, buf, h); return; } cells = IDT_RSQE_CNT(stat); KASSERT(cells > 0, ("zero cell count")); if (vcc->vcc.aal == ATMIO_AAL_0) { /* deliver this packet as it is */ if ((m = patm_rcv_mbuf(sc, buf, h, 1)) == NULL) return; m->m_len = cells * 48; m->m_pkthdr.len = m->m_len; m->m_pkthdr.rcvif = sc->ifp; } else if (vcc->vcc.aal == ATMIO_AAL_34) { /* XXX AAL3/4 */ patm_rcv_free(sc, buf, h); return; } else if (vcc->vcc.aal == ATMIO_AAL_5) { if (stat & IDT_RSQE_CRC) { if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); if (vcc->chain != NULL) { m_freem(vcc->chain); vcc->chain = vcc->last = NULL; } return; } /* append to current chain */ if (vcc->chain == NULL) { if ((m = patm_rcv_mbuf(sc, buf, h, 1)) == NULL) return; m->m_len = cells * 48; m->m_pkthdr.len = m->m_len; m->m_pkthdr.rcvif = sc->ifp; vcc->chain = vcc->last = m; } else { if ((m = patm_rcv_mbuf(sc, buf, h, 0)) == NULL) return; m->m_len = cells * 48; vcc->last->m_next = m; vcc->last = m; vcc->chain->m_pkthdr.len += m->m_len; } if (!(stat & IDT_RSQE_EPDU)) return; trail = mtod(m, u_char *) + m->m_len - 6; len = (trail[0] << 8) + trail[1]; if ((u_int)vcc->chain->m_pkthdr.len < len + 8) { patm_printf(sc, "%s: bad aal5 lengths %u %u\n", __func__, (u_int)m->m_pkthdr.len, len); m_freem(vcc->chain); vcc->chain = vcc->last = NULL; return; } m->m_len -= vcc->chain->m_pkthdr.len - len; KASSERT(m->m_len >= 0, ("bad last mbuf")); m = vcc->chain; vcc->chain = vcc->last = NULL; m->m_pkthdr.len = len; } else
static void kr_rx(struct kr_softc *sc) { struct kr_rxdesc *rxd; struct ifnet *ifp = sc->kr_ifp; int cons, prog, packet_len, count, error; struct kr_desc *cur_rx; struct mbuf *m; KR_LOCK_ASSERT(sc); cons = sc->kr_cdata.kr_rx_cons; bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag, sc->kr_cdata.kr_rx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (prog = 0; prog < KR_RX_RING_CNT; KR_INC(cons, KR_RX_RING_CNT)) { cur_rx = &sc->kr_rdata.kr_rx_ring[cons]; rxd = &sc->kr_cdata.kr_rxdesc[cons]; m = rxd->rx_m; if ((cur_rx->kr_ctl & KR_CTL_D) == 0) break; prog++; packet_len = KR_PKTSIZE(cur_rx->kr_devcs); count = m->m_len - KR_DMASIZE(cur_rx->kr_ctl); /* Assume it's error */ error = 1; if (packet_len != count) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); else if (count < 64) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_LD) == 0) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_ROK) != 0) { error = 0; bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); m = rxd->rx_m; kr_fixup_rx(m); m->m_pkthdr.rcvif = ifp; /* Skip 4 bytes of CRC */ m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); KR_UNLOCK(sc); (*ifp->if_input)(ifp, m); KR_LOCK(sc); } if (error) { /* Restore CONTROL and CA values, reset DEVCS */ cur_rx->kr_ctl = rxd->saved_ctl; cur_rx->kr_ca = rxd->saved_ca; cur_rx->kr_devcs = 0; } else { /* Reinit descriptor */ cur_rx->kr_ctl = KR_CTL_IOD; if (cons == KR_RX_RING_CNT - 1) cur_rx->kr_ctl |= KR_CTL_COD; cur_rx->kr_devcs = 0; cur_rx->kr_ca = 0; if (kr_newbuf(sc, cons) != 0) { device_printf(sc->kr_dev, "Failed to allocate buffer\n"); break; } } bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag, sc->kr_cdata.kr_rx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); } if (prog > 0) { sc->kr_cdata.kr_rx_cons = cons; bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag, sc->kr_cdata.kr_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } }
/* * One helper hook function is used by any hook points. * + from hhook_type we can determine the packet direction: * HHOOK_TYPE_IPSEC_IN or HHOOK_TYPE_IPSEC_OUT; * + from hhook_id we can determine address family: AF_INET or AF_INET6; * + udata contains pointer to enc_softc; * + ctx_data contains pointer to struct ipsec_ctx_data. */ static int enc_hhook(int32_t hhook_type, int32_t hhook_id, void *udata, void *ctx_data, void *hdata, struct osd *hosd) { struct enchdr hdr; struct ipsec_ctx_data *ctx; struct enc_softc *sc; struct ifnet *ifp, *rcvif; struct pfil_head *ph; int pdir; sc = (struct enc_softc *)udata; ifp = sc->sc_ifp; if ((ifp->if_flags & IFF_UP) == 0) return (0); ctx = (struct ipsec_ctx_data *)ctx_data; /* XXX: wrong hook point was used by caller? */ if (ctx->af != hhook_id) return (EPFNOSUPPORT); if (((hhook_type == HHOOK_TYPE_IPSEC_IN && (ctx->enc & V_bpf_mask_in) != 0) || (hhook_type == HHOOK_TYPE_IPSEC_OUT && (ctx->enc & V_bpf_mask_out) != 0)) && bpf_peers_present(ifp->if_bpf) != 0) { hdr.af = ctx->af; hdr.spi = ctx->sav->spi; hdr.flags = 0; if (ctx->sav->alg_enc != SADB_EALG_NONE) hdr.flags |= M_CONF; if (ctx->sav->alg_auth != SADB_AALG_NONE) hdr.flags |= M_AUTH; bpf_mtap2(ifp->if_bpf, &hdr, sizeof(hdr), *ctx->mp); } switch (hhook_type) { case HHOOK_TYPE_IPSEC_IN: if (ctx->enc == IPSEC_ENC_BEFORE) { /* Do accounting only once */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_IBYTES, (*ctx->mp)->m_pkthdr.len); } if ((ctx->enc & V_filter_mask_in) == 0) return (0); /* skip pfil processing */ pdir = PFIL_IN; break; case HHOOK_TYPE_IPSEC_OUT: if (ctx->enc == IPSEC_ENC_BEFORE) { /* Do accounting only once */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, (*ctx->mp)->m_pkthdr.len); } if ((ctx->enc & V_filter_mask_out) == 0) return (0); /* skip pfil processing */ pdir = PFIL_OUT; break; default: return (EINVAL); } switch (hhook_id) { #ifdef INET case AF_INET: ph = &V_inet_pfil_hook; break; #endif #ifdef INET6 case AF_INET6: ph = &V_inet6_pfil_hook; break; #endif default: ph = NULL; } if (ph == NULL || !PFIL_HOOKED(ph)) return (0); /* Make a packet looks like it was received on enc(4) */ rcvif = (*ctx->mp)->m_pkthdr.rcvif; (*ctx->mp)->m_pkthdr.rcvif = ifp; if (pfil_run_hooks(ph, ctx->mp, ifp, pdir, NULL) != 0 || *ctx->mp == NULL) { *ctx->mp = NULL; /* consumed by filter */ return (EACCES); } (*ctx->mp)->m_pkthdr.rcvif = rcvif; return (0); }
/* * Ethernet output routine. * Encapsulate a packet of type family for the local net. * Use trailer local net encapsulation if enough data in first * packet leaves a multiple of 512 bytes of data in remainder. */ int ether_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { short type; int error = 0, hdrcmplt = 0; u_char edst[ETHER_ADDR_LEN]; struct llentry *lle = NULL; struct rtentry *rt0 = NULL; struct ether_header *eh; struct pf_mtag *t; int loop_copy = 1; int hlen; /* link layer header length */ int is_gw = 0; uint32_t pflags = 0; if (ro != NULL) { if (!(m->m_flags & (M_BCAST | M_MCAST))) { lle = ro->ro_lle; if (lle != NULL) pflags = lle->la_flags; } rt0 = ro->ro_rt; if (rt0 != NULL && (rt0->rt_flags & RTF_GATEWAY) != 0) is_gw = 1; } #ifdef MAC error = mac_ifnet_check_transmit(ifp, m); if (error) senderr(error); #endif M_PROFILE(m); if (ifp->if_flags & IFF_MONITOR) senderr(ENETDOWN); if (!((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))) senderr(ENETDOWN); hlen = ETHER_HDR_LEN; switch (dst->sa_family) { #ifdef INET case AF_INET: if (lle != NULL && (pflags & LLE_VALID) != 0) memcpy(edst, &lle->ll_addr.mac16, sizeof(edst)); else error = arpresolve(ifp, is_gw, m, dst, edst, &pflags); if (error) return (error == EWOULDBLOCK ? 0 : error); type = htons(ETHERTYPE_IP); break; case AF_ARP: { struct arphdr *ah; ah = mtod(m, struct arphdr *); ah->ar_hrd = htons(ARPHRD_ETHER); loop_copy = 0; /* if this is for us, don't do it */ switch(ntohs(ah->ar_op)) { case ARPOP_REVREQUEST: case ARPOP_REVREPLY: type = htons(ETHERTYPE_REVARP); break; case ARPOP_REQUEST: case ARPOP_REPLY: default: type = htons(ETHERTYPE_ARP); break; } if (m->m_flags & M_BCAST) bcopy(ifp->if_broadcastaddr, edst, ETHER_ADDR_LEN); else bcopy(ar_tha(ah), edst, ETHER_ADDR_LEN); } break; #endif #ifdef INET6 case AF_INET6: if (lle != NULL && (pflags & LLE_VALID)) memcpy(edst, &lle->ll_addr.mac16, sizeof(edst)); else error = nd6_resolve(ifp, is_gw, m, dst, (u_char *)edst, &pflags); if (error) return (error == EWOULDBLOCK ? 0 : error); type = htons(ETHERTYPE_IPV6); break; #endif case pseudo_AF_HDRCMPLT: { const struct ether_header *eh; hdrcmplt = 1; /* FALLTHROUGH */ case AF_UNSPEC: loop_copy = 0; /* if this is for us, don't do it */ eh = (const struct ether_header *)dst->sa_data; (void)memcpy(edst, eh->ether_dhost, sizeof (edst)); type = eh->ether_type; break; } default: if_printf(ifp, "can't handle af%d\n", dst->sa_family); senderr(EAFNOSUPPORT); } if ((pflags & LLE_IFADDR) != 0) { update_mbuf_csumflags(m, m); return (if_simloop(ifp, m, dst->sa_family, 0)); } /* * Add local net header. If no space in first mbuf, * allocate another. */ M_PREPEND(m, ETHER_HDR_LEN, M_NOWAIT); if (m == NULL) senderr(ENOBUFS); eh = mtod(m, struct ether_header *); if (hdrcmplt == 0) { memcpy(&eh->ether_type, &type, sizeof(eh->ether_type)); memcpy(eh->ether_dhost, edst, sizeof (edst)); memcpy(eh->ether_shost, IF_LLADDR(ifp),sizeof(eh->ether_shost)); } /* * If a simplex interface, and the packet is being sent to our * Ethernet address or a broadcast address, loopback a copy. * XXX To make a simplex device behave exactly like a duplex * device, we should copy in the case of sending to our own * ethernet address (thus letting the original actually appear * on the wire). However, we don't do that here for security * reasons and compatibility with the original behavior. */ if ((ifp->if_flags & IFF_SIMPLEX) && loop_copy && ((t = pf_find_mtag(m)) == NULL || !t->routed)) { if (m->m_flags & M_BCAST) { struct mbuf *n; /* * Because if_simloop() modifies the packet, we need a * writable copy through m_dup() instead of a readonly * one as m_copy[m] would give us. The alternative would * be to modify if_simloop() to handle the readonly mbuf, * but performancewise it is mostly equivalent (trading * extra data copying vs. extra locking). * * XXX This is a local workaround. A number of less * often used kernel parts suffer from the same bug. * See PR kern/105943 for a proposed general solution. */ if ((n = m_dup(m, M_NOWAIT)) != NULL) { update_mbuf_csumflags(m, n); (void)if_simloop(ifp, n, dst->sa_family, hlen); } else if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); } else if (bcmp(eh->ether_dhost, eh->ether_shost, ETHER_ADDR_LEN) == 0) { update_mbuf_csumflags(m, m); (void) if_simloop(ifp, m, dst->sa_family, hlen); return (0); /* XXX */ } } /* * Bridges require special output handling. */ if (ifp->if_bridge) { BRIDGE_OUTPUT(ifp, m, error); return (error); } #if defined(INET) || defined(INET6) if (ifp->if_carp && (error = (*carp_output_p)(ifp, m, dst))) goto bad; #endif /* Handle ng_ether(4) processing, if any */ if (ifp->if_l2com != NULL) { KASSERT(ng_ether_output_p != NULL, ("ng_ether_output_p is NULL")); if ((error = (*ng_ether_output_p)(ifp, &m)) != 0) { bad: if (m != NULL) m_freem(m); return (error); } if (m == NULL) return (0); } /* Continue with link-layer output */ return ether_output_frame(ifp, m); }
/* * Process a received Ethernet packet; the packet is in the * mbuf chain m with the ethernet header at the front. */ static void ether_input_internal(struct ifnet *ifp, struct mbuf *m) { struct ether_header *eh; u_short etype; if ((ifp->if_flags & IFF_UP) == 0) { m_freem(m); return; } #ifdef DIAGNOSTIC if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { if_printf(ifp, "discard frame at !IFF_DRV_RUNNING\n"); m_freem(m); return; } #endif if (m->m_len < ETHER_HDR_LEN) { /* XXX maybe should pullup? */ if_printf(ifp, "discard frame w/o leading ethernet " "header (len %u pkt len %u)\n", m->m_len, m->m_pkthdr.len); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); m_freem(m); return; } eh = mtod(m, struct ether_header *); etype = ntohs(eh->ether_type); random_harvest_queue(m, sizeof(*m), 2, RANDOM_NET_ETHER); CURVNET_SET_QUIET(ifp->if_vnet); if (ETHER_IS_MULTICAST(eh->ether_dhost)) { if (ETHER_IS_BROADCAST(eh->ether_dhost)) m->m_flags |= M_BCAST; else m->m_flags |= M_MCAST; if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1); } #ifdef MAC /* * Tag the mbuf with an appropriate MAC label before any other * consumers can get to it. */ mac_ifnet_create_mbuf(ifp, m); #endif /* * Give bpf a chance at the packet. */ ETHER_BPF_MTAP(ifp, m); /* * If the CRC is still on the packet, trim it off. We do this once * and once only in case we are re-entered. Nothing else on the * Ethernet receive path expects to see the FCS. */ if (m->m_flags & M_HASFCS) { m_adj(m, -ETHER_CRC_LEN); m->m_flags &= ~M_HASFCS; } if (!(ifp->if_capenable & IFCAP_HWSTATS)) if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); /* Allow monitor mode to claim this frame, after stats are updated. */ if (ifp->if_flags & IFF_MONITOR) { m_freem(m); CURVNET_RESTORE(); return; } /* Handle input from a lagg(4) port */ if (ifp->if_type == IFT_IEEE8023ADLAG) { KASSERT(lagg_input_p != NULL, ("%s: if_lagg not loaded!", __func__)); m = (*lagg_input_p)(ifp, m); if (m != NULL) ifp = m->m_pkthdr.rcvif; else { CURVNET_RESTORE(); return; } } /* * If the hardware did not process an 802.1Q tag, do this now, * to allow 802.1P priority frames to be passed to the main input * path correctly. * TODO: Deal with Q-in-Q frames, but not arbitrary nesting levels. */ if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_VLAN) { struct ether_vlan_header *evl; if (m->m_len < sizeof(*evl) && (m = m_pullup(m, sizeof(*evl))) == NULL) { #ifdef DIAGNOSTIC if_printf(ifp, "cannot pullup VLAN header\n"); #endif if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); CURVNET_RESTORE(); return; } evl = mtod(m, struct ether_vlan_header *); m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag); m->m_flags |= M_VLANTAG; bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, ETHER_HDR_LEN - ETHER_TYPE_LEN); m_adj(m, ETHER_VLAN_ENCAP_LEN); eh = mtod(m, struct ether_header *); } M_SETFIB(m, ifp->if_fib); /* Allow ng_ether(4) to claim this frame. */ if (ifp->if_l2com != NULL) { KASSERT(ng_ether_input_p != NULL, ("%s: ng_ether_input_p is NULL", __func__)); m->m_flags &= ~M_PROMISC; (*ng_ether_input_p)(ifp, &m); if (m == NULL) { CURVNET_RESTORE(); return; } eh = mtod(m, struct ether_header *); }
/* * Ethernet interface receiver interrupt. */ static __inline void ed_rint(struct ed_softc *sc) { struct ifnet *ifp = sc->ifp; u_char boundry; u_short len; struct ed_ring packet_hdr; bus_size_t packet_ptr; ED_ASSERT_LOCKED(sc); /* * Set NIC to page 1 registers to get 'current' pointer */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_PAGE_1 | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * 'sc->next_packet' is the logical beginning of the ring-buffer - * i.e. it points to where new data has been buffered. The 'CURR' * (current) register points to the logical end of the ring-buffer - * i.e. it points to where additional new data will be added. We loop * here until the logical beginning equals the logical end (or in * other words, until the ring-buffer is empty). */ while (sc->next_packet != ed_nic_inb(sc, ED_P1_CURR)) { /* get pointer to this buffer's header structure */ packet_ptr = sc->mem_ring + (sc->next_packet - sc->rec_page_start) * ED_PAGE_SIZE; /* * The byte count includes a 4 byte header that was added by * the NIC. */ sc->readmem(sc, packet_ptr, (char *) &packet_hdr, sizeof(packet_hdr)); len = packet_hdr.count; if (len > (ETHER_MAX_LEN - ETHER_CRC_LEN + sizeof(struct ed_ring)) || len < (ETHER_MIN_LEN - ETHER_CRC_LEN + sizeof(struct ed_ring))) { /* * Length is a wild value. There's a good chance that * this was caused by the NIC being old and buggy. * The bug is that the length low byte is duplicated * in the high byte. Try to recalculate the length * based on the pointer to the next packet. Also, * need ot preserve offset into page. * * NOTE: sc->next_packet is pointing at the current * packet. */ len &= ED_PAGE_SIZE - 1; if (packet_hdr.next_packet >= sc->next_packet) len += (packet_hdr.next_packet - sc->next_packet) * ED_PAGE_SIZE; else len += ((packet_hdr.next_packet - sc->rec_page_start) + (sc->rec_page_stop - sc->next_packet)) * ED_PAGE_SIZE; /* * because buffers are aligned on 256-byte boundary, * the length computed above is off by 256 in almost * all cases. Fix it... */ if (len & 0xff) len -= 256; if (len > (ETHER_MAX_LEN - ETHER_CRC_LEN + sizeof(struct ed_ring))) sc->mibdata.dot3StatsFrameTooLongs++; } /* * Be fairly liberal about what we allow as a "reasonable" * length so that a [crufty] packet will make it to BPF (and * can thus be analyzed). Note that all that is really * important is that we have a length that will fit into one * mbuf cluster or less; the upper layer protocols can then * figure out the length from their own length field(s). But * make sure that we have at least a full ethernet header or * we would be unable to call ether_input() later. */ if ((len >= sizeof(struct ed_ring) + ETHER_HDR_LEN) && (len <= MCLBYTES) && (packet_hdr.next_packet >= sc->rec_page_start) && (packet_hdr.next_packet < sc->rec_page_stop)) { /* * Go get packet. */ ed_get_packet(sc, packet_ptr + sizeof(struct ed_ring), len - sizeof(struct ed_ring)); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); } else { /* * Really BAD. The ring pointers are corrupted. */ log(LOG_ERR, "%s: NIC memory corrupt - invalid packet length %d\n", ifp->if_xname, len); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); ed_reset(ifp); return; } /* * Update next packet pointer */ sc->next_packet = packet_hdr.next_packet; /* * Update NIC boundry pointer - being careful to keep it one * buffer behind. (as recommended by NS databook) */ boundry = sc->next_packet - 1; if (boundry < sc->rec_page_start) boundry = sc->rec_page_stop - 1; /* * Set NIC to page 0 registers to update boundry register */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_BNRY, boundry); /* * Set NIC to page 1 registers before looping to top (prepare * to get 'CURR' current pointer) */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_PAGE_1 | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } }
static inline void am79900_rint(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; struct lermd rmd; uint32_t rmd1; int bix, rp; #if defined(__i386__) struct ether_header *eh; #endif bix = sc->sc_last_rd; /* Process all buffers with valid data. */ for (;;) { rp = LE_RMDADDR(sc, bix); (*sc->sc_copyfromdesc)(sc, &rmd, rp, sizeof(rmd)); rmd1 = LE_LE32TOH(rmd.rmd1); if (rmd1 & LE_R1_OWN) break; m = NULL; if ((rmd1 & (LE_R1_ERR | LE_R1_STP | LE_R1_ENP)) != (LE_R1_STP | LE_R1_ENP)){ if (rmd1 & LE_R1_ERR) { #ifdef LEDEBUG if (rmd1 & LE_R1_ENP) { if ((rmd1 & LE_R1_OFLO) == 0) { if (rmd1 & LE_R1_FRAM) if_printf(ifp, "framing error\n"); if (rmd1 & LE_R1_CRC) if_printf(ifp, "crc mismatch\n"); } } else if (rmd1 & LE_R1_OFLO) if_printf(ifp, "overflow\n"); #endif if (rmd1 & LE_R1_BUFF) if_printf(ifp, "receive buffer error\n"); } else if ((rmd1 & (LE_R1_STP | LE_R1_ENP)) != (LE_R1_STP | LE_R1_ENP)) if_printf(ifp, "dropping chained buffer\n"); } else { #ifdef LEDEBUG if (sc->sc_flags & LE_DEBUG) am79900_recv_print(sc, bix); #endif /* Pull the packet off the interface. */ m = lance_get(sc, LE_RBUFADDR(sc, bix), (LE_LE32TOH(rmd.rmd2) & 0xfff) - ETHER_CRC_LEN); } rmd.rmd1 = LE_HTOLE32(LE_R1_OWN | LE_R1_ONES | (-LEBLEN & 0xfff)); rmd.rmd2 = 0; rmd.rmd3 = 0; (*sc->sc_copytodesc)(sc, &rmd, rp, sizeof(rmd)); if (++bix == sc->sc_nrbuf) bix = 0; if (m != NULL) { if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); #if defined(__i386__) /* * The VMware LANCE does not present IFF_SIMPLEX * behavior on multicast packets. Thus drop the * packet if it is from ourselves. */ eh = mtod(m, struct ether_header *); if (!ether_cmp(eh->ether_shost, sc->sc_enaddr)) { m_freem(m); continue; } #endif /* Pass the packet up. */ LE_UNLOCK(sc); (*ifp->if_input)(ifp, m); LE_LOCK(sc); } else