static void am7990_rint(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; struct lermd rmd; int bix, rp; #if defined(LANCE_REVC_BUG) struct ether_header *eh; /* Make sure this is short-aligned, for ether_cmp(). */ static uint16_t bcast_enaddr[3] = { ~0, ~0, ~0 }; #endif bix = sc->sc_last_rd; /* Process all buffers with valid data. */ for (;;) { rp = LE_RMDADDR(sc, bix); (*sc->sc_copyfromdesc)(sc, &rmd, rp, sizeof(rmd)); if (rmd.rmd1_bits & LE_R1_OWN) break; m = NULL; if ((rmd.rmd1_bits & (LE_R1_ERR | LE_R1_STP | LE_R1_ENP)) != (LE_R1_STP | LE_R1_ENP)) { if (rmd.rmd1_bits & LE_R1_ERR) { #ifdef LEDEBUG if (rmd.rmd1_bits & LE_R1_ENP) { if ((rmd.rmd1_bits & LE_R1_OFLO) == 0) { if (rmd.rmd1_bits & LE_R1_FRAM) if_printf(ifp, "framing error\n"); if (rmd.rmd1_bits & LE_R1_CRC) if_printf(ifp, "crc mismatch\n"); } } else if (rmd.rmd1_bits & LE_R1_OFLO) if_printf(ifp, "overflow\n"); #endif if (rmd.rmd1_bits & LE_R1_BUFF) if_printf(ifp, "receive buffer error\n"); } else if ((rmd.rmd1_bits & (LE_R1_STP | LE_R1_ENP)) != (LE_R1_STP | LE_R1_ENP)) if_printf(ifp, "dropping chained buffer\n"); } else { #ifdef LEDEBUG if (sc->sc_flags & LE_DEBUG) am7990_recv_print(sc, bix); #endif /* Pull the packet off the interface. */ m = lance_get(sc, LE_RBUFADDR(sc, bix), (int)rmd.rmd3 - ETHER_CRC_LEN); } rmd.rmd1_bits = LE_R1_OWN; rmd.rmd2 = -LEBLEN | LE_XMD2_ONES; rmd.rmd3 = 0; (*sc->sc_copytodesc)(sc, &rmd, rp, sizeof(rmd)); if (++bix == sc->sc_nrbuf) bix = 0; if (m != NULL) { ifp->if_ipackets++; #ifdef LANCE_REVC_BUG /* * The old LANCE (Rev. C) chips have a bug which * causes garbage to be inserted in front of the * received packet. The workaround is to ignore * packets with an invalid destination address * (garbage will usually not match). * Of course, this precludes multicast support... */ eh = mtod(m, struct ether_header *); if (ether_cmp(eh->ether_dhost, sc->sc_enaddr) && ether_cmp(eh->ether_dhost, bcast_enaddr)) { m_freem(m); continue; } #endif /* Pass the packet up. */ LE_UNLOCK(sc); (*ifp->if_input)(ifp, m); LE_LOCK(sc); } else
static inline void am79900_rint(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; struct lermd rmd; uint32_t rmd1; int bix, rp; #if defined(__i386__) && !defined(PC98) struct ether_header *eh; #endif bix = sc->sc_last_rd; /* Process all buffers with valid data. */ for (;;) { rp = LE_RMDADDR(sc, bix); (*sc->sc_copyfromdesc)(sc, &rmd, rp, sizeof(rmd)); rmd1 = LE_LE32TOH(rmd.rmd1); if (rmd1 & LE_R1_OWN) break; m = NULL; if ((rmd1 & (LE_R1_ERR | LE_R1_STP | LE_R1_ENP)) != (LE_R1_STP | LE_R1_ENP)){ if (rmd1 & LE_R1_ERR) { #ifdef LEDEBUG if (rmd1 & LE_R1_ENP) { if ((rmd1 & LE_R1_OFLO) == 0) { if (rmd1 & LE_R1_FRAM) if_printf(ifp, "framing error\n"); if (rmd1 & LE_R1_CRC) if_printf(ifp, "crc mismatch\n"); } } else if (rmd1 & LE_R1_OFLO) if_printf(ifp, "overflow\n"); #endif if (rmd1 & LE_R1_BUFF) if_printf(ifp, "receive buffer error\n"); } else if ((rmd1 & (LE_R1_STP | LE_R1_ENP)) != (LE_R1_STP | LE_R1_ENP)) if_printf(ifp, "dropping chained buffer\n"); } else { #ifdef LEDEBUG if (sc->sc_flags & LE_DEBUG) am79900_recv_print(sc, bix); #endif /* Pull the packet off the interface. */ m = lance_get(sc, LE_RBUFADDR(sc, bix), (LE_LE32TOH(rmd.rmd2) & 0xfff) - ETHER_CRC_LEN); } rmd.rmd1 = LE_HTOLE32(LE_R1_OWN | LE_R1_ONES | (-LEBLEN & 0xfff)); rmd.rmd2 = 0; rmd.rmd3 = 0; (*sc->sc_copytodesc)(sc, &rmd, rp, sizeof(rmd)); if (++bix == sc->sc_nrbuf) bix = 0; if (m != NULL) { ifp->if_ipackets++; #if defined(__i386__) && !defined(PC98) /* * The VMware LANCE does not present IFF_SIMPLEX * behavior on multicast packets. Thus drop the * packet if it is from ourselves. */ eh = mtod(m, struct ether_header *); if (!ether_cmp(eh->ether_shost, sc->sc_enaddr)) { m_freem(m); continue; } #endif /* Pass the packet up. */ LE_UNLOCK(sc); (*ifp->if_input)(ifp, m); LE_LOCK(sc); } else
/* * Set up the initialization block and the descriptor rings. */ static void am7990_meminit(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct leinit init; struct lermd rmd; struct letmd tmd; u_long a; int bix; LE_LOCK_ASSERT(sc, MA_OWNED); if (ifp->if_flags & IFF_PROMISC) init.init_mode = LE_MODE_NORMAL | LE_MODE_PROM; else init.init_mode = LE_MODE_NORMAL; init.init_padr[0] = (sc->sc_enaddr[1] << 8) | sc->sc_enaddr[0]; init.init_padr[1] = (sc->sc_enaddr[3] << 8) | sc->sc_enaddr[2]; init.init_padr[2] = (sc->sc_enaddr[5] << 8) | sc->sc_enaddr[4]; lance_setladrf(sc, init.init_ladrf); sc->sc_last_rd = 0; sc->sc_first_td = sc->sc_last_td = sc->sc_no_td = 0; a = sc->sc_addr + LE_RMDADDR(sc, 0); init.init_rdra = a; init.init_rlen = (a >> 16) | ((ffs(sc->sc_nrbuf) - 1) << 13); a = sc->sc_addr + LE_TMDADDR(sc, 0); init.init_tdra = a; init.init_tlen = (a >> 16) | ((ffs(sc->sc_ntbuf) - 1) << 13); (*sc->sc_copytodesc)(sc, &init, LE_INITADDR(sc), sizeof(init)); /* * Set up receive ring descriptors. */ for (bix = 0; bix < sc->sc_nrbuf; bix++) { a = sc->sc_addr + LE_RBUFADDR(sc, bix); rmd.rmd0 = a; rmd.rmd1_hadr = a >> 16; rmd.rmd1_bits = LE_R1_OWN; rmd.rmd2 = -LEBLEN | LE_XMD2_ONES; rmd.rmd3 = 0; (*sc->sc_copytodesc)(sc, &rmd, LE_RMDADDR(sc, bix), sizeof(rmd)); } /* * Set up transmit ring descriptors. */ for (bix = 0; bix < sc->sc_ntbuf; bix++) { a = sc->sc_addr + LE_TBUFADDR(sc, bix); tmd.tmd0 = a; tmd.tmd1_hadr = a >> 16; tmd.tmd1_bits = 0; tmd.tmd2 = LE_XMD2_ONES; tmd.tmd3 = 0; (*sc->sc_copytodesc)(sc, &tmd, LE_TMDADDR(sc, bix), sizeof(tmd)); } }
/* * Set up the initialization block and the descriptor rings. */ static void am79900_meminit(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct leinit init; struct lermd rmd; struct letmd tmd; u_long a; int bix; LE_LOCK_ASSERT(sc, MA_OWNED); if (ifp->if_flags & IFF_PROMISC) init.init_mode = LE_HTOLE32(LE_MODE_NORMAL | LE_MODE_PROM); else init.init_mode = LE_HTOLE32(LE_MODE_NORMAL); init.init_mode |= LE_HTOLE32(((ffs(sc->sc_ntbuf) - 1) << 28) | ((ffs(sc->sc_nrbuf) - 1) << 20)); init.init_padr[0] = LE_HTOLE32(sc->sc_enaddr[0] | (sc->sc_enaddr[1] << 8) | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[3] << 24)); init.init_padr[1] = LE_HTOLE32(sc->sc_enaddr[4] | (sc->sc_enaddr[5] << 8)); lance_setladrf(sc, init.init_ladrf); sc->sc_last_rd = 0; sc->sc_first_td = sc->sc_last_td = sc->sc_no_td = 0; a = sc->sc_addr + LE_RMDADDR(sc, 0); init.init_rdra = LE_HTOLE32(a); a = sc->sc_addr + LE_TMDADDR(sc, 0); init.init_tdra = LE_HTOLE32(a); (*sc->sc_copytodesc)(sc, &init, LE_INITADDR(sc), sizeof(init)); /* * Set up receive ring descriptors. */ for (bix = 0; bix < sc->sc_nrbuf; bix++) { a = sc->sc_addr + LE_RBUFADDR(sc, bix); rmd.rmd0 = LE_HTOLE32(a); rmd.rmd1 = LE_HTOLE32(LE_R1_OWN | LE_R1_ONES | (-LEBLEN & 0xfff)); rmd.rmd2 = 0; rmd.rmd3 = 0; (*sc->sc_copytodesc)(sc, &rmd, LE_RMDADDR(sc, bix), sizeof(rmd)); } /* * Set up transmit ring descriptors. */ for (bix = 0; bix < sc->sc_ntbuf; bix++) { a = sc->sc_addr + LE_TBUFADDR(sc, bix); tmd.tmd0 = LE_HTOLE32(a); tmd.tmd1 = LE_HTOLE32(LE_T1_ONES); tmd.tmd2 = 0; tmd.tmd3 = 0; (*sc->sc_copytodesc)(sc, &tmd, LE_TMDADDR(sc, bix), sizeof(tmd)); } }