Static void url_setmulti(struct url_softc *sc) { struct ifnet *ifp; struct ether_multi *enm; struct ether_multistep step; u_int32_t hashes[2] = { 0, 0 }; int h = 0; int mcnt = 0; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return; ifp = GET_IFP(sc); if (ifp->if_flags & IFF_PROMISC) { URL_SETBIT2(sc, URL_RCR, URL_RCR_AAM|URL_RCR_AAP); return; } else if (ifp->if_flags & IFF_ALLMULTI) { allmulti: ifp->if_flags |= IFF_ALLMULTI; URL_SETBIT2(sc, URL_RCR, URL_RCR_AAM); URL_CLRBIT2(sc, URL_RCR, URL_RCR_AAP); return; } /* first, zot all the existing hash bits */ url_csr_write_4(sc, URL_MAR0, 0); url_csr_write_4(sc, URL_MAR4, 0); /* now program new ones */ ETHER_FIRST_MULTI(step, &sc->sc_ac, enm); while (enm != NULL) { if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) goto allmulti; h = url_calchash(enm->enm_addrlo); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h -32)); mcnt++; ETHER_NEXT_MULTI(step, enm); } ifp->if_flags &= ~IFF_ALLMULTI; URL_CLRBIT2(sc, URL_RCR, URL_RCR_AAM|URL_RCR_AAP); if (mcnt){ URL_SETBIT2(sc, URL_RCR, URL_RCR_AM); } else { URL_CLRBIT2(sc, URL_RCR, URL_RCR_AM); } url_csr_write_4(sc, URL_MAR0, hashes[0]); url_csr_write_4(sc, URL_MAR4, hashes[1]); }
Static void kue_setmulti(struct kue_softc *sc) { struct ifnet *ifp = GET_IFP(sc); struct ether_multi *enm; struct ether_multistep step; int i; DPRINTFN(5,("%s: %s: enter\n", USBDEVNAME(sc->kue_dev), __func__)); if (ifp->if_flags & IFF_PROMISC) { allmulti: ifp->if_flags |= IFF_ALLMULTI; sc->kue_rxfilt |= KUE_RXFILT_ALLMULTI; sc->kue_rxfilt &= ~KUE_RXFILT_MULTICAST; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); return; } sc->kue_rxfilt &= ~KUE_RXFILT_ALLMULTI; i = 0; #if defined (__NetBSD__) ETHER_FIRST_MULTI(step, &sc->kue_ec, enm); #else ETHER_FIRST_MULTI(step, &sc->arpcom, enm); #endif while (enm != NULL) { if (i == KUE_MCFILTCNT(sc) || memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) goto allmulti; memcpy(KUE_MCFILT(sc, i), enm->enm_addrlo, ETHER_ADDR_LEN); ETHER_NEXT_MULTI(step, enm); i++; } ifp->if_flags &= ~IFF_ALLMULTI; sc->kue_rxfilt |= KUE_RXFILT_MULTICAST; kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SET_MCAST_FILTERS, i, sc->kue_mcfilters, i * ETHER_ADDR_LEN); kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); }
/* * Set up the logical address filter. */ void bmac_setladrf(struct bmac_softc *sc) { struct arpcom *ac = &sc->arpcom; struct ifnet *ifp = &sc->arpcom.ac_if; struct ether_multi *enm; struct ether_multistep step; u_int32_t crc; u_int16_t hash[4]; int x; /* * Set up multicast address filter by passing all multicast addresses * through a crc generator, and then using the high order 6 bits as an * index into the 64 bit logical address filter. The high order bit * selects the word, while the rest of the bits select the bit within * the word. */ if (ifp->if_flags & IFF_PROMISC) { bmac_set_bits(sc, RXCFG, RxPromiscEnable); return; } if (ac->ac_multirangecnt > 0) ifp->if_flags |= IFF_ALLMULTI; if (ifp->if_flags & IFF_ALLMULTI) { hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; goto chipit; } hash[3] = hash[2] = hash[1] = hash[0] = 0; ETHER_FIRST_MULTI(step, ac, enm); while (enm != NULL) { crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); /* Just want the 6 most significant bits. */ crc >>= 26; /* Set the corresponding bit in the filter. */ hash[crc >> 4] |= 1 << (crc & 0xf); ETHER_NEXT_MULTI(step, enm); } ifp->if_flags &= ~IFF_ALLMULTI; chipit: bmac_write_reg(sc, HASH0, hash[0]); bmac_write_reg(sc, HASH1, hash[1]); bmac_write_reg(sc, HASH2, hash[2]); bmac_write_reg(sc, HASH3, hash[3]); x = bmac_read_reg(sc, RXCFG); x &= ~RxPromiscEnable; x |= RxHashFilterEnable; bmac_write_reg(sc, RXCFG, x); }
/* * Program the 64-bit multicast hash filter. */ static void vr_setmulti(struct vr_softc *sc) { struct ifnet *ifp; int h = 0; uint32_t hashes[2] = { 0, 0 }; struct ether_multistep step; struct ether_multi *enm; int mcnt = 0; uint8_t rxfilt; ifp = &sc->vr_ec.ec_if; rxfilt = CSR_READ_1(sc, VR_RXCFG); if (ifp->if_flags & IFF_PROMISC) { allmulti: ifp->if_flags |= IFF_ALLMULTI; rxfilt |= VR_RXCFG_RX_MULTI; CSR_WRITE_1(sc, VR_RXCFG, rxfilt); CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, VR_MAR0, 0); CSR_WRITE_4(sc, VR_MAR1, 0); /* now program new ones */ ETHER_FIRST_MULTI(step, &sc->vr_ec, enm); while (enm != NULL) { if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) goto allmulti; h = vr_calchash(enm->enm_addrlo); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); ETHER_NEXT_MULTI(step, enm); mcnt++; } ifp->if_flags &= ~IFF_ALLMULTI; if (mcnt) rxfilt |= VR_RXCFG_RX_MULTI; else rxfilt &= ~VR_RXCFG_RX_MULTI; CSR_WRITE_4(sc, VR_MAR0, hashes[0]); CSR_WRITE_4(sc, VR_MAR1, hashes[1]); CSR_WRITE_1(sc, VR_RXCFG, rxfilt); }
/* * Create setup packet and put in queue for sending. */ void ni_setup(struct ni_softc *sc) { struct ifnet *ifp = &sc->sc_if; struct ni_msg *msg; struct ni_ptdb *ptdb; struct ether_multi *enm; struct ether_multistep step; int i, res; msg = REMQHI(&fqb->nf_mforw); if ((int)msg == Q_EMPTY) return; /* What to do? */ ptdb = (struct ni_ptdb *)&msg->nm_text[0]; memset(ptdb, 0, sizeof(struct ni_ptdb)); msg->nm_opcode = BVP_MSG; msg->nm_len = 18; ptdb->np_index = 2; /* definition type index */ ptdb->np_fque = 2; /* Free queue */ if (ifp->if_flags & IFF_RUNNING) { msg->nm_opcode2 = NI_STPTDB; ptdb->np_type = ETHERTYPE_IP; ptdb->np_flags = PTDB_UNKN|PTDB_BDC; if (ifp->if_flags & IFF_PROMISC) ptdb->np_flags |= PTDB_PROMISC; memset(ptdb->np_mcast[0], 0xff, ETHER_ADDR_LEN); /* Broadcast */ ptdb->np_adrlen = 1; msg->nm_len += 8; ifp->if_flags &= ~IFF_ALLMULTI; if ((ifp->if_flags & IFF_PROMISC) == 0) { ETHER_FIRST_MULTI(step, &sc->sc_ec, enm); i = 1; while (enm != NULL) { if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) { ifp->if_flags |= IFF_ALLMULTI; ptdb->np_flags |= PTDB_AMC; break; } msg->nm_len += 8; ptdb->np_adrlen++; memcpy(ptdb->np_mcast[i++], enm->enm_addrlo, ETHER_ADDR_LEN); ETHER_NEXT_MULTI(step, enm); } } } else msg->nm_opcode2 = NI_CLPTDB; res = INSQTI(msg, &gvp->nc_forw0); if (res == Q_EMPTY) { WAITREG(NI_PCR, PCR_OWN); NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN); } }
void dme_set_addr_filter(struct dme_softc *sc) { struct ether_multi *enm; struct ether_multistep step; struct ethercom *ec; struct ifnet *ifp; uint16_t af[4]; int i; ec = &sc->sc_ethercom; ifp = &ec->ec_if; if (ifp->if_flags & IFF_PROMISC) { dme_write(sc, DM9000_RCR, DM9000_RCR_RXEN | DM9000_RCR_WTDIS | DM9000_RCR_PRMSC); ifp->if_flags |= IFF_ALLMULTI; return; } af[0] = af[1] = af[2] = af[3] = 0x0000; ifp->if_flags &= ~IFF_ALLMULTI; ETHER_FIRST_MULTI(step, ec, enm); while (enm != NULL) { uint16_t hash; if (memcpy(enm->enm_addrlo, enm->enm_addrhi, sizeof(enm->enm_addrlo))) { /* * We must listen to a range of multicast addresses. * For now, just accept all multicasts, rather than * trying to set only those filter bits needed to match * the range. (At this time, the only use of address * ranges is for IP multicast routing, for which the * range is big enough to require all bits set.) */ ifp->if_flags |= IFF_ALLMULTI; af[0] = af[1] = af[2] = af[3] = 0xffff; break; } else { hash = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3F; af[(uint16_t)(hash>>4)] |= (uint16_t)(1 << (hash % 16)); ETHER_NEXT_MULTI(step, enm); } } /* Write the multicast address filter */ for(i=0; i<4; i++) { dme_write(sc, DM9000_MAB0+i*2, af[i] & 0xFF); dme_write(sc, DM9000_MAB0+i*2+1, (af[i] >> 8) & 0xFF); } /* Setup RX controls */ dme_write(sc, DM9000_RCR, DM9000_RCR_RXEN | DM9000_RCR_WTDIS); }
/* * Set the EPIC multicast hash table. * * NOTE: We rely on a recently-updated mii_media_active here! */ void epic_set_mchash(struct epic_softc *sc) { struct arpcom *ac = &sc->sc_arpcom; struct ifnet *ifp = &sc->sc_arpcom.ac_if; struct ether_multi *enm; struct ether_multistep step; u_int32_t hash, mchash[4]; /* * Set up the multicast address filter by passing all multicast * addresses through a CRC generator, and then using the low-order * 6 bits as an index into the 64 bit multicast hash table (only * the lower 16 bits of each 32 bit multicast hash register are * valid). The high order bits select the register, while the * rest of the bits select the bit within the register. */ if (ifp->if_flags & IFF_PROMISC) goto allmulti; if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) { /* XXX hardware bug in 10Mbps mode. */ goto allmulti; } if (ac->ac_multirangecnt > 0) goto allmulti; mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0; ETHER_FIRST_MULTI(step, ac, enm); while (enm != NULL) { hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); hash >>= 26; /* Set the corresponding bit in the hash table. */ mchash[hash >> 4] |= 1 << (hash & 0xf); ETHER_NEXT_MULTI(step, enm); } ifp->if_flags &= ~IFF_ALLMULTI; goto sethash; allmulti: ifp->if_flags |= IFF_ALLMULTI; mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff; sethash: bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]); bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]); bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]); bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]); }
void ste_iff(struct ste_softc *sc) { struct ifnet *ifp = &sc->arpcom.ac_if; struct arpcom *ac = &sc->arpcom; struct ether_multi *enm; struct ether_multistep step; u_int32_t rxmode, hashes[2]; int h = 0; rxmode = CSR_READ_1(sc, STE_RX_MODE); rxmode &= ~(STE_RXMODE_ALLMULTI | STE_RXMODE_BROADCAST | STE_RXMODE_MULTIHASH | STE_RXMODE_PROMISC | STE_RXMODE_UNICAST); bzero(hashes, sizeof(hashes)); ifp->if_flags &= ~IFF_ALLMULTI; /* * Always accept broadcast frames. * Always accept frames destined to our station address. */ rxmode |= STE_RXMODE_BROADCAST | STE_RXMODE_UNICAST; if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { ifp->if_flags |= IFF_ALLMULTI; rxmode |= STE_RXMODE_ALLMULTI; if (ifp->if_flags & IFF_PROMISC) rxmode |= STE_RXMODE_PROMISC; } else { rxmode |= STE_RXMODE_MULTIHASH; /* now program new ones */ ETHER_FIRST_MULTI(step, ac, enm); while (enm != NULL) { h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3F; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); ETHER_NEXT_MULTI(step, enm); } } CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF); CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF); CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF); CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF); CSR_WRITE_1(sc, STE_RX_MODE, rxmode); }
void mec_setfilter(struct mec_softc *sc) { struct arpcom *ec = &sc->sc_ac; struct ifnet *ifp = &sc->sc_ac.ac_if; struct ether_multi *enm; struct ether_multistep step; bus_space_tag_t st = sc->sc_st; bus_space_handle_t sh = sc->sc_sh; uint64_t mchash; uint32_t control, hash; int mcnt; control = bus_space_read_8(st, sh, MEC_MAC_CONTROL); control &= ~MEC_MAC_FILTER_MASK; if (ifp->if_flags & IFF_PROMISC) { control |= MEC_MAC_FILTER_PROMISC; bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL); bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); return; } mcnt = 0; mchash = 0; ETHER_FIRST_MULTI(step, ec, enm); while (enm != NULL) { if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { /* Set allmulti for a range of multicast addresses. */ control |= MEC_MAC_FILTER_ALLMULTI; bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL); bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); return; } #define mec_calchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26) hash = mec_calchash(enm->enm_addrlo); mchash |= 1 << hash; mcnt++; ETHER_NEXT_MULTI(step, enm); } ifp->if_flags &= ~IFF_ALLMULTI; if (mcnt > 0) control |= MEC_MAC_FILTER_MATCHMULTI; bus_space_write_8(st, sh, MEC_MULTICAST, mchash); bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); }
void smsc_setmulti(struct smsc_softc *sc) { struct ifnet *ifp = &sc->sc_ec.ec_if; struct ether_multi *enm; struct ether_multistep step; uint32_t hashtbl[2] = { 0, 0 }; uint32_t hash; if (sc->sc_dying) return; if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { allmulti: smsc_dbg_printf(sc, "receive all multicast enabled\n"); sc->sc_mac_csr |= SMSC_MAC_CSR_MCPAS; sc->sc_mac_csr &= ~SMSC_MAC_CSR_HPFILT; smsc_write_reg(sc, SMSC_MAC_CSR, sc->sc_mac_csr); return; } else { sc->sc_mac_csr |= SMSC_MAC_CSR_HPFILT; sc->sc_mac_csr &= ~(SMSC_MAC_CSR_PRMS | SMSC_MAC_CSR_MCPAS); } ETHER_FIRST_MULTI(step, &sc->sc_ec, enm); while (enm != NULL) { if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) goto allmulti; hash = smsc_hash(enm->enm_addrlo); hashtbl[hash >> 5] |= 1 << (hash & 0x1F); ETHER_NEXT_MULTI(step, enm); } /* Debug */ if (sc->sc_mac_csr & SMSC_MAC_CSR_HPFILT) { smsc_dbg_printf(sc, "receive select group of macs\n"); } else { smsc_dbg_printf(sc, "receive own packets only\n"); } /* Write the hash table and mac control registers */ ifp->if_flags &= ~IFF_ALLMULTI; smsc_write_reg(sc, SMSC_HASHH, hashtbl[1]); smsc_write_reg(sc, SMSC_HASHL, hashtbl[0]); smsc_write_reg(sc, SMSC_MAC_CSR, sc->sc_mac_csr); }
void ste_setmulti(struct ste_softc *sc) { struct ifnet *ifp; struct arpcom *ac = &sc->arpcom; struct ether_multi *enm; struct ether_multistep step; int h = 0; u_int32_t hashes[2] = { 0, 0 }; ifp = &sc->arpcom.ac_if; allmulti: if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); return; } /* first, zot all the existing hash bits */ CSR_WRITE_2(sc, STE_MAR0, 0); CSR_WRITE_2(sc, STE_MAR1, 0); CSR_WRITE_2(sc, STE_MAR2, 0); CSR_WRITE_2(sc, STE_MAR3, 0); /* now program new ones */ ETHER_FIRST_MULTI(step, ac, enm); while (enm != NULL) { if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { ifp->if_flags |= IFF_ALLMULTI; goto allmulti; } h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3F; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); ETHER_NEXT_MULTI(step, enm); } CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF); CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF); CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF); CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF); STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); return; }
void smsc_iff(struct smsc_softc *sc) { struct ifnet *ifp = &sc->sc_ac.ac_if; struct arpcom *ac = &sc->sc_ac; struct ether_multi *enm; struct ether_multistep step; uint32_t hashtbl[2] = { 0, 0 }; uint32_t hash; if (usbd_is_dying(sc->sc_udev)) return; sc->sc_mac_csr &= ~(SMSC_MAC_CSR_HPFILT | SMSC_MAC_CSR_MCPAS | SMSC_MAC_CSR_PRMS); ifp->if_flags &= ~IFF_ALLMULTI; if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { ifp->if_flags |= IFF_ALLMULTI; sc->sc_mac_csr |= SMSC_MAC_CSR_MCPAS; if (ifp->if_flags & IFF_PROMISC) sc->sc_mac_csr |= SMSC_MAC_CSR_PRMS; } else { sc->sc_mac_csr |= SMSC_MAC_CSR_HPFILT; ETHER_FIRST_MULTI(step, ac, enm); while (enm != NULL) { hash = smsc_hash(enm->enm_addrlo); hashtbl[hash >> 5] |= 1 << (hash & 0x1F); ETHER_NEXT_MULTI(step, enm); } } /* Debug */ if (sc->sc_mac_csr & SMSC_MAC_CSR_MCPAS) smsc_dbg_printf(sc, "receive all multicast enabled\n"); else if (sc->sc_mac_csr & SMSC_MAC_CSR_HPFILT) smsc_dbg_printf(sc, "receive select group of macs\n"); /* Write the hash table and mac control registers */ smsc_write_reg(sc, SMSC_HASHH, hashtbl[1]); smsc_write_reg(sc, SMSC_HASHL, hashtbl[0]); smsc_write_reg(sc, SMSC_MAC_CSR, sc->sc_mac_csr); }
void pdq_os_addr_fill( pdq_t *pdq, pdq_lanaddr_t *addr, size_t num_addrs) { pdq_softc_t *sc = (pdq_softc_t *) pdq->pdq_os_ctx; struct ether_multistep step; struct ether_multi *enm; ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm); while (enm != NULL && num_addrs > 0) { ((u_short *) addr->lanaddr_bytes)[0] = ((u_short *) enm->enm_addrlo)[0]; ((u_short *) addr->lanaddr_bytes)[1] = ((u_short *) enm->enm_addrlo)[1]; ((u_short *) addr->lanaddr_bytes)[2] = ((u_short *) enm->enm_addrlo)[2]; ETHER_NEXT_MULTI(step, enm); addr++; num_addrs--; } }
void kue_setmulti(struct kue_softc *sc) { struct arpcom *ac = &sc->arpcom; struct ifnet *ifp = GET_IFP(sc); struct ether_multi *enm; struct ether_multistep step; int i; DPRINTFN(5,("%s: %s: enter\n", sc->kue_dev.dv_xname, __func__)); if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { allmulti: ifp->if_flags |= IFF_ALLMULTI; sc->kue_rxfilt |= KUE_RXFILT_ALLMULTI; sc->kue_rxfilt &= ~KUE_RXFILT_MULTICAST; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); return; } sc->kue_rxfilt &= ~KUE_RXFILT_ALLMULTI; i = 0; ETHER_FIRST_MULTI(step, ac, enm); while (enm != NULL) { if (i == KUE_MCFILTCNT(sc)) goto allmulti; memcpy(KUE_MCFILT(sc, i), enm->enm_addrlo, ETHER_ADDR_LEN); ETHER_NEXT_MULTI(step, enm); i++; } ifp->if_flags &= ~IFF_ALLMULTI; sc->kue_rxfilt |= KUE_RXFILT_MULTICAST; kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SET_MCAST_FILTERS, i, sc->kue_mcfilters, i * ETHER_ADDR_LEN); kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); }
void vnet_setmulti(struct vnet_softc *sc, int set) { struct arpcom *ac = &sc->sc_ac; struct ether_multi *enm; struct ether_multistep step; struct vnet_mcast_info mi; int count = 0; if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) || !ISSET(sc->sc_vio_state, VIO_ACK_RDX)) return; bzero(&mi, sizeof(mi)); mi.tag.type = VIO_TYPE_CTRL; mi.tag.stype = VIO_SUBTYPE_INFO; mi.tag.stype_env = VNET_MCAST_INFO; mi.tag.sid = sc->sc_local_sid; mi.set = set ? 1 : 0; KERNEL_LOCK(); ETHER_FIRST_MULTI(step, ac, enm); while (enm != NULL) { /* XXX What about multicast ranges? */ bcopy(enm->enm_addrlo, mi.mcast_addr[count], ETHER_ADDR_LEN); ETHER_NEXT_MULTI(step, enm); count++; if (count < VNET_NUM_MCAST) continue; mi.count = VNET_NUM_MCAST; vnet_sendmsg(sc, &mi, sizeof(mi)); count = 0; } if (count > 0) { mi.count = count; vnet_sendmsg(sc, &mi, sizeof(mi)); } KERNEL_UNLOCK(); }
static void sq_set_filter(struct sq_softc *sc) { struct ethercom *ec = &sc->sc_ethercom; struct ifnet *ifp = &sc->sc_ethercom.ec_if; struct ether_multi *enm; struct ether_multistep step; /* * Check for promiscuous mode. Also implies * all-multicast. */ if (ifp->if_flags & IFF_PROMISC) { sc->sc_rxcmd |= RXCMD_REC_ALL; ifp->if_flags |= IFF_ALLMULTI; return; } /* * The 8003 has no hash table. If we have any multicast * addresses on the list, enable reception of all multicast * frames. * * XXX The 80c03 has a hash table. We should use it. */ ETHER_FIRST_MULTI(step, ec, enm); if (enm == NULL) { sc->sc_rxcmd &= ~RXCMD_REC_MASK; sc->sc_rxcmd |= RXCMD_REC_BROAD; ifp->if_flags &= ~IFF_ALLMULTI; return; } sc->sc_rxcmd |= RXCMD_REC_MULTI; ifp->if_flags |= IFF_ALLMULTI; }
/* * Go through the list of multicast addresses and calculate the logical * address filter. */ void mace_calcladrf(struct mc_softc *sc, u_int8_t *af) { struct ether_multi *enm; u_int32_t crc; struct ifnet *ifp = &sc->sc_arpcom.ac_if; struct arpcom *ac = &sc->sc_arpcom; struct ether_multistep step; /* * Set up multicast address filter by passing all multicast addresses * through a crc generator, and then using the high order 6 bits as an * index into the 64 bit logical address filter. The high order bit * selects the word, while the rest of the bits select the bit within * the word. */ if (ac->ac_multirangecnt > 0) goto allmulti; *((u_int32_t *)af) = *((u_int32_t *)af + 1) = 0; ETHER_FIRST_MULTI(step, ac, enm); while (enm != NULL) { crc = ether_crc32_le(enm->enm_addrlo, sizeof(enm->enm_addrlo)); /* Just want the 6 most significant bits. */ crc >>= 26; /* Set the corresponding bit in the filter. */ af[crc >> 3] |= 1 << (crc & 7); ETHER_NEXT_MULTI(step, enm); } ifp->if_flags &= ~IFF_ALLMULTI; return; allmulti: ifp->if_flags |= IFF_ALLMULTI; *((u_int32_t *)af) = *((u_int32_t *)af + 1) = 0xffffffff; }
void tsec_iff(struct tsec_softc *sc) { struct arpcom *ac = &sc->sc_ac; struct ifnet *ifp = &sc->sc_ac.ac_if; struct ether_multi *enm; struct ether_multistep step; uint32_t crc, hash[8]; uint32_t rctrl; int i; rctrl = tsec_read(sc, TSEC_RCTRL); rctrl &= ~TSEC_RCTRL_PROM; ifp->if_flags &= ~IFF_ALLMULTI; if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { ifp->if_flags |= IFF_ALLMULTI; rctrl |= TSEC_RCTRL_PROM; bzero(hash, sizeof(hash)); } else { ETHER_FIRST_MULTI(step, ac, enm); while (enm != NULL) { crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); crc >>= 24; hash[crc / 32] |= 1 << (31 - (crc % 32)); ETHER_NEXT_MULTI(step, enm); } } for (i = 0; i < nitems(hash); i++) tsec_write(sc, TSEC_GADDR0 + i * 4, hash[i]); tsec_write(sc, TSEC_RCTRL, rctrl); }
/* * Set up the logical address filter. */ void cas_setladrf(struct cas_softc *sc) { struct ifnet *ifp = &sc->sc_arpcom.ac_if; struct ether_multi *enm; struct ether_multistep step; struct arpcom *ac = &sc->sc_arpcom; bus_space_tag_t t = sc->sc_memt; bus_space_handle_t h = sc->sc_memh; u_int32_t crc, hash[16], v; int i; /* Get current RX configuration */ v = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); /* * Turn off promiscuous mode, promiscuous group mode (all multicast), * and hash filter. Depending on the case, the right bit will be * enabled. */ v &= ~(CAS_MAC_RX_PROMISCUOUS|CAS_MAC_RX_HASH_FILTER| CAS_MAC_RX_PROMISC_GRP); if ((ifp->if_flags & IFF_PROMISC) != 0) { /* Turn on promiscuous mode */ v |= CAS_MAC_RX_PROMISCUOUS; ifp->if_flags |= IFF_ALLMULTI; goto chipit; } /* * Set up multicast address filter by passing all multicast addresses * through a crc generator, and then using the high order 8 bits as an * index into the 256 bit logical address filter. The high order 4 * bits selects the word, while the other 4 bits select the bit within * the word (where bit 0 is the MSB). */ /* Clear hash table */ for (i = 0; i < 16; i++) hash[i] = 0; ETHER_FIRST_MULTI(step, ac, enm); while (enm != NULL) { if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { /* * We must listen to a range of multicast addresses. * For now, just accept all multicasts, rather than * trying to set only those filter bits needed to match * the range. (At this time, the only use of address * ranges is for IP multicast routing, for which the * range is big enough to require all bits set.) * XXX use the addr filter for this */ ifp->if_flags |= IFF_ALLMULTI; v |= CAS_MAC_RX_PROMISC_GRP; goto chipit; } crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); /* Just want the 8 most significant bits. */ crc >>= 24; /* Set the corresponding bit in the filter. */ hash[crc >> 4] |= 1 << (15 - (crc & 15)); ETHER_NEXT_MULTI(step, enm); } v |= CAS_MAC_RX_HASH_FILTER; ifp->if_flags &= ~IFF_ALLMULTI; /* Now load the hash table into the chip (if we are using it) */ for (i = 0; i < 16; i++) { bus_space_write_4(t, h, CAS_MAC_HASH0 + i * (CAS_MAC_HASH1-CAS_MAC_HASH0), hash[i]); } chipit: bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, v); }
/* * Program the 64-bit multicast hash filter. */ void rtk_setmulti(struct rtk_softc *sc) { struct ifnet *ifp; uint32_t hashes[2] = { 0, 0 }; uint32_t rxfilt; struct ether_multi *enm; struct ether_multistep step; int h, mcnt; ifp = &sc->ethercom.ec_if; rxfilt = CSR_READ_4(sc, RTK_RXCFG); if (ifp->if_flags & IFF_PROMISC) { allmulti: ifp->if_flags |= IFF_ALLMULTI; rxfilt |= RTK_RXCFG_RX_MULTI; CSR_WRITE_4(sc, RTK_RXCFG, rxfilt); CSR_WRITE_4(sc, RTK_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, RTK_MAR4, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, RTK_MAR0, 0); CSR_WRITE_4(sc, RTK_MAR4, 0); /* now program new ones */ ETHER_FIRST_MULTI(step, &sc->ethercom, enm); mcnt = 0; while (enm != NULL) { if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) goto allmulti; h = rtk_calchash(enm->enm_addrlo); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; ETHER_NEXT_MULTI(step, enm); } ifp->if_flags &= ~IFF_ALLMULTI; if (mcnt) rxfilt |= RTK_RXCFG_RX_MULTI; else rxfilt &= ~RTK_RXCFG_RX_MULTI; CSR_WRITE_4(sc, RTK_RXCFG, rxfilt); /* * For some unfathomable reason, RealTek decided to reverse * the order of the multicast hash registers in the PCI Express * parts. This means we have to write the hash pattern in reverse * order for those devices. */ if ((sc->sc_quirk & RTKQ_PCIE) != 0) { CSR_WRITE_4(sc, RTK_MAR0, bswap32(hashes[1])); CSR_WRITE_4(sc, RTK_MAR4, bswap32(hashes[0])); } else { CSR_WRITE_4(sc, RTK_MAR0, hashes[0]); CSR_WRITE_4(sc, RTK_MAR4, hashes[1]); } }
/* * Reset multicast filter. */ void qe_mcreset(struct qe_softc *sc) { struct ethercom *ec = &sc->sc_ethercom; struct ifnet *ifp = &sc->sc_ethercom.ec_if; bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t mr = sc->sc_mr; struct ether_multi *enm; struct ether_multistep step; uint32_t crc; uint16_t hash[4]; uint8_t octet, maccc, *ladrp = (uint8_t *)&hash[0]; int i; #if defined(SUN4U) || defined(__GNUC__) (void)&t; #endif /* We also enable transmitter & receiver here */ maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV; if (ifp->if_flags & IFF_PROMISC) { maccc |= QE_MR_MACCC_PROM; bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); return; } if (ifp->if_flags & IFF_ALLMULTI) { bus_space_write_1(t, mr, QE_MRI_IAC, QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); bus_space_write_1(t, mr, QE_MRI_IAC, 0); bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); return; } hash[3] = hash[2] = hash[1] = hash[0] = 0; ETHER_FIRST_MULTI(step, ec, enm); while (enm != NULL) { if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { /* * We must listen to a range of multicast * addresses. For now, just accept all * multicasts, rather than trying to set only * those filter bits needed to match the range. * (At this time, the only use of address * ranges is for IP multicast routing, for * which the range is big enough to require * all bits set.) */ bus_space_write_1(t, mr, QE_MRI_IAC, QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); bus_space_write_1(t, mr, QE_MRI_IAC, 0); ifp->if_flags |= IFF_ALLMULTI; break; } crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); crc >>= 26; hash[crc >> 4] |= 1 << (crc & 0xf); ETHER_NEXT_MULTI(step, enm); } /* We need to byte-swap the hash before writing to the chip. */ for (i = 0; i < 7; i += 2) { octet = ladrp[i]; ladrp[i] = ladrp[i + 1]; ladrp[i + 1] = octet; } bus_space_write_1(t, mr, QE_MRI_IAC, QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8); bus_space_write_1(t, mr, QE_MRI_IAC, 0); bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); }
/* * How Command Block List Processing is done. * * A running CBL is never manipulated. If there is a CBL already running, * further CMDs are deferred until the current list is done. A new list is * setup when the old one has finished. * This eases programming. To manipulate a running CBL it is necessary to * suspend the Command Unit to avoid race conditions. After a suspend * is sent we have to wait for an interrupt that ACKs the suspend. Then * we can manipulate the CBL and resume operation. I am not sure that this * is more effective than the current, much simpler approach. => KISS * See i82596CA data sheet page 26. * * A CBL is running or on the way to be set up when (sc->sc_next_cb != 0). * * A CBL may consist of TX CMDs, and _only_ TX CMDs. * A TX CBL is running or on the way to be set up when * ((sc->sc_next_cb != 0) && (sc->sc_next_tbd != 0)). * * A CBL may consist of other non-TX CMDs like IAS or CONF, and _only_ * non-TX CMDs. * * This comes mostly through the way how an Ethernet driver works and * because running CBLs are not manipulated when they are on the way. If * if_start() is called there will be TX CMDs enqueued so we have a running * CBL and other CMDs from e.g. if_ioctl() will be deferred and vice versa. * * The Multicast Setup Command is special. A MCS needs more space than * a single CB has. Actual space requirement depends on the length of the * multicast list. So we always defer MCS until other CBLs are finished, * then we setup a CONF CMD in the first CB. The CONF CMD is needed to * turn ALLMULTI on the hardware on or off. The MCS is the 2nd CB and may * use all the remaining space in the CBL and the Transmit Buffer Descriptor * List. (Therefore CBL and TBDL must be continuous in physical and virtual * memory. This is guaranteed through the definitions of the list offsets * in i82596reg.h and because it is only a single DMA segment used for all * lists.) When ALLMULTI is enabled via the CONF CMD, the MCS is run with * a multicast list length of 0, thus disabling the multicast filter. * A deferred MCS is signaled via ((sc->sc_flags & IEE_WANT_MCAST) != 0) */ void iee_cb_setup(struct iee_softc *sc, uint32_t cmd) { struct iee_cb *cb = SC_CB(sc, sc->sc_next_cb); struct ifnet *ifp = &sc->sc_ethercom.ec_if; struct ether_multistep step; struct ether_multi *enm; memset(cb, 0, sc->sc_cb_sz); cb->cb_cmd = cmd; switch (cmd & IEE_CB_CMD) { case IEE_CB_CMD_NOP: /* NOP CMD */ break; case IEE_CB_CMD_IAS: /* Individual Address Setup */ memcpy(__UNVOLATILE(cb->cb_ind_addr), CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); break; case IEE_CB_CMD_CONF: /* Configure */ memcpy(__UNVOLATILE(cb->cb_cf), sc->sc_cf, sc->sc_cf[0] & IEE_CF_0_CNT_M); break; case IEE_CB_CMD_MCS: /* Multicast Setup */ if (sc->sc_next_cb != 0) { sc->sc_flags |= IEE_WANT_MCAST; return; } sc->sc_flags &= ~IEE_WANT_MCAST; if ((sc->sc_cf[8] & IEE_CF_8_PRM) != 0) { /* Need no multicast filter in promisc mode. */ iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL | IEE_CB_I); return; } /* Leave room for a CONF CMD to en/dis-able ALLMULTI mode */ cb = SC_CB(sc, sc->sc_next_cb + 1); cb->cb_cmd = cmd; cb->cb_mcast.mc_size = 0; ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); while (enm != NULL) { if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0 || cb->cb_mcast.mc_size * ETHER_ADDR_LEN + 2 * sc->sc_cb_sz > sc->sc_cb_sz * IEE_NCB + sc->sc_tbd_sz * IEE_NTBD * IEE_NCB) { cb->cb_mcast.mc_size = 0; break; } memcpy(__UNVOLATILE(&cb->cb_mcast.mc_addrs[ cb->cb_mcast.mc_size * ETHER_ADDR_LEN]), enm->enm_addrlo, ETHER_ADDR_LEN); ETHER_NEXT_MULTI(step, enm); cb->cb_mcast.mc_size++; } if (cb->cb_mcast.mc_size == 0) { /* Can't do exact mcast filtering, do ALLMULTI mode. */ ifp->if_flags |= IFF_ALLMULTI; sc->sc_cf[11] &= ~IEE_CF_11_MCALL; } else { /* disable ALLMULTI and load mcast list */ ifp->if_flags &= ~IFF_ALLMULTI; sc->sc_cf[11] |= IEE_CF_11_MCALL; /* Mcast setup may need more than sc->sc_cb_sz bytes. */ bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, sc->sc_cb_off, sc->sc_cb_sz * IEE_NCB + sc->sc_tbd_sz * IEE_NTBD * IEE_NCB, BUS_DMASYNC_PREWRITE); } iee_cb_setup(sc, IEE_CB_CMD_CONF); break; case IEE_CB_CMD_TR: /* Transmit */ cb->cb_transmit.tx_tbd_addr = IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_tbd_off + sc->sc_tbd_sz * sc->sc_next_tbd)); cb->cb_cmd |= IEE_CB_SF; /* Always use Flexible Mode. */ break; case IEE_CB_CMD_TDR: /* Time Domain Reflectometry */ break; case IEE_CB_CMD_DUMP: /* Dump */ break; case IEE_CB_CMD_DIAG: /* Diagnose */ break; default: /* can't happen */ break; } cb->cb_link_addr = IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_cb_off + sc->sc_cb_sz * (sc->sc_next_cb + 1))); IEE_CBSYNC(sc, sc->sc_next_cb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); sc->sc_next_cb++; ifp->if_timer = 5; }
void be_mcreset(struct be_softc *sc) { struct arpcom *ac = &sc->sc_arpcom; struct ifnet *ifp = &sc->sc_arpcom.ac_if; bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t br = sc->sc_br; u_int32_t crc; u_int16_t hash[4]; u_int8_t octet; u_int32_t v; int i, j; struct ether_multi *enm; struct ether_multistep step; if (ifp->if_flags & IFF_PROMISC) { v = bus_space_read_4(t, br, BE_BRI_RXCFG); v |= BE_BR_RXCFG_PMISC; bus_space_write_4(t, br, BE_BRI_RXCFG, v); return; } if (ifp->if_flags & IFF_ALLMULTI) { hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; goto chipit; } hash[3] = hash[2] = hash[1] = hash[0] = 0; ETHER_FIRST_MULTI(step, ac, enm); while (enm != NULL) { if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { /* * We must listen to a range of multicast * addresses. For now, just accept all * multicasts, rather than trying to set only * those filter bits needed to match the range. * (At this time, the only use of address * ranges is for IP multicast routing, for * which the range is big enough to require * all bits set.) */ hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; ifp->if_flags |= IFF_ALLMULTI; goto chipit; } crc = 0xffffffff; for (i = 0; i < ETHER_ADDR_LEN; i++) { octet = enm->enm_addrlo[i]; for (j = 0; j < 8; j++) { if ((crc & 1) ^ (octet & 1)) { crc >>= 1; crc ^= MC_POLY_LE; } else crc >>= 1; octet >>= 1; } }
/* * Set up the logical address filter. */ void bmac_setladrf(struct bmac_softc *sc) { struct ifnet *ifp = &sc->arpcom.ac_if; struct ether_multi *enm; struct ether_multistep step; u_int32_t crc; u_int16_t hash[4]; int x; /* * Set up multicast address filter by passing all multicast addresses * through a crc generator, and then using the high order 6 bits as an * index into the 64 bit logical address filter. The high order bit * selects the word, while the rest of the bits select the bit within * the word. */ if (ifp->if_flags & IFF_PROMISC) { bmac_set_bits(sc, RXCFG, RxPromiscEnable); return; } if (ifp->if_flags & IFF_ALLMULTI) { hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; goto chipit; } hash[3] = hash[2] = hash[1] = hash[0] = 0; ETHER_FIRST_MULTI(step, &sc->arpcom, enm); while (enm != NULL) { if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { /* * We must listen to a range of multicast addresses. * For now, just accept all multicasts, rather than * trying to set only those filter bits needed to match * the range. (At this time, the only use of address * ranges is for IP multicast routing, for which the * range is big enough to require all bits set.) */ hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; ifp->if_flags |= IFF_ALLMULTI; goto chipit; } crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); /* Just want the 6 most significant bits. */ crc >>= 26; /* Set the corresponding bit in the filter. */ hash[crc >> 4] |= 1 << (crc & 0xf); ETHER_NEXT_MULTI(step, enm); } ifp->if_flags &= ~IFF_ALLMULTI; chipit: bmac_write_reg(sc, HASH0, hash[0]); bmac_write_reg(sc, HASH1, hash[1]); bmac_write_reg(sc, HASH2, hash[2]); bmac_write_reg(sc, HASH3, hash[3]); x = bmac_read_reg(sc, RXCFG); x &= ~RxPromiscEnable; x |= RxHashFilterEnable; bmac_write_reg(sc, RXCFG, x); }
/* * Create a setup packet and put in queue for sending. */ void qe_setup(struct qe_softc *sc) { struct ether_multi *enm; struct ether_multistep step; struct qe_cdata *qc = sc->sc_qedata; struct ifnet *ifp = &sc->sc_if; u_int8_t *enaddr = sc->sc_ac.ac_enaddr; int i, j, k, idx, s; s = splnet(); if (sc->sc_inq == (TXDESCS - 1)) { sc->sc_setup = 1; splx(s); return; } sc->sc_setup = 0; /* * Init the setup packet with valid info. */ memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */ for (i = 0; i < ETHER_ADDR_LEN; i++) qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */ /* * Multicast handling. The DEQNA can handle up to 12 direct * ethernet addresses. */ j = 3; k = 0; ifp->if_flags &= ~IFF_ALLMULTI; ETHER_FIRST_MULTI(step, &sc->sc_ac, enm); while (enm != NULL) { if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) { ifp->if_flags |= IFF_ALLMULTI; break; } for (i = 0; i < ETHER_ADDR_LEN; i++) qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i]; j++; if (j == 8) { j = 1; k += 64; } if (k > 64) { ifp->if_flags |= IFF_ALLMULTI; break; } ETHER_NEXT_MULTI(step, enm); } idx = sc->sc_nexttx; qc->qc_xmit[idx].qe_buf_len = -64; /* * How is the DEQNA turned in ALLMULTI mode??? * Until someone tells me, fall back to PROMISC when more than * 12 ethernet addresses. */ if (ifp->if_flags & IFF_ALLMULTI) ifp->if_flags |= IFF_PROMISC; else if (ifp->if_pcount == 0) ifp->if_flags &= ~IFF_PROMISC; if (ifp->if_flags & IFF_PROMISC) qc->qc_xmit[idx].qe_buf_len = -65; qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup); qc->qc_xmit[idx].qe_addr_hi = HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG; qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET; qc->qc_xmit[idx].qe_addr_hi |= QE_VALID; if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) { QE_WCSR(QE_CSR_XMTL, LOWORD(&sc->sc_pqedata->qc_xmit[idx])); QE_WCSR(QE_CSR_XMTH, HIWORD(&sc->sc_pqedata->qc_xmit[idx])); } sc->sc_inq++; if (++sc->sc_nexttx == TXDESCS) sc->sc_nexttx = 0; splx(s); }
static void camprogram(struct sn_softc *sc) { struct ether_multistep step; struct ether_multi *enm; struct ifnet *ifp; int timeout; int mcount = 0; caminitialise(sc); ifp = &sc->sc_if; /* Always load our own address first. */ camentry(sc, mcount, CLLADDR(ifp->if_sadl)); mcount++; /* Assume we won't need allmulti bit. */ ifp->if_flags &= ~IFF_ALLMULTI; /* Loop through multicast addresses */ ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); while (enm != NULL) { if (mcount == MAXCAM) { ifp->if_flags |= IFF_ALLMULTI; break; } if (memcmp(enm->enm_addrlo, enm->enm_addrhi, sizeof(enm->enm_addrlo)) != 0) { /* * SONIC's CAM is programmed with specific * addresses. It has no way to specify a range. * (Well, thats not exactly true. If the * range is small one could program each addr * within the range as a separate CAM entry) */ ifp->if_flags |= IFF_ALLMULTI; break; } /* program the CAM with the specified entry */ camentry(sc, mcount, enm->enm_addrlo); mcount++; ETHER_NEXT_MULTI(step, enm); } NIC_PUT(sc, SNR_CDP, LOWER(sc->v_cda)); NIC_PUT(sc, SNR_CDC, MAXCAM); NIC_PUT(sc, SNR_CR, CR_LCAM); wbflush(); timeout = 10000; while ((NIC_GET(sc, SNR_CR) & CR_LCAM) && timeout--) delay(10); if (timeout == 0) { /* XXX */ panic("%s: CAM initialisation failed", device_xname(sc->sc_dev)); } timeout = 10000; while (((NIC_GET(sc, SNR_ISR) & ISR_LCD) == 0) && timeout--) delay(10); if (NIC_GET(sc, SNR_ISR) & ISR_LCD) NIC_PUT(sc, SNR_ISR, ISR_LCD); else printf("%s: CAM initialisation without interrupt\n", device_xname(sc->sc_dev)); }
/* * Create a setup packet and put in queue for sending. */ void ze_setup(struct ze_softc *sc) { struct ether_multi *enm; struct ether_multistep step; struct ze_cdata *zc = sc->sc_zedata; struct ifnet *ifp = &sc->sc_if; const u_int8_t *enaddr = CLLADDR(ifp->if_sadl); int j, idx, reg; if (sc->sc_inq == (TXDESCS - 1)) { sc->sc_setup = 1; return; } sc->sc_setup = 0; /* * Init the setup packet with valid info. */ memset(zc->zc_setup, 0xff, sizeof(zc->zc_setup)); /* Broadcast */ memcpy(zc->zc_setup, enaddr, ETHER_ADDR_LEN); /* * Multicast handling. The SGEC can handle up to 16 direct * ethernet addresses. */ j = 16; ifp->if_flags &= ~IFF_ALLMULTI; ETHER_FIRST_MULTI(step, &sc->sc_ec, enm); while (enm != NULL) { if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) { ifp->if_flags |= IFF_ALLMULTI; break; } memcpy(&zc->zc_setup[j], enm->enm_addrlo, ETHER_ADDR_LEN); j += 8; ETHER_NEXT_MULTI(step, enm); if ((enm != NULL)&& (j == 128)) { ifp->if_flags |= IFF_ALLMULTI; break; } } /* * ALLMULTI implies PROMISC in this driver. */ if (ifp->if_flags & IFF_ALLMULTI) ifp->if_flags |= IFF_PROMISC; else if (ifp->if_pcount == 0) ifp->if_flags &= ~IFF_PROMISC; /* * Fiddle with the receive logic. */ reg = ZE_RCSR(ZE_CSR6); DELAY(10); ZE_WCSR(ZE_CSR6, reg & ~ZE_NICSR6_SR); /* Stop rx */ reg &= ~ZE_NICSR6_AF; if (ifp->if_flags & IFF_PROMISC) reg |= ZE_NICSR6_AF_PROM; else if (ifp->if_flags & IFF_ALLMULTI) reg |= ZE_NICSR6_AF_ALLM; DELAY(10); ZE_WCSR(ZE_CSR6, reg); /* * Only send a setup packet if needed. */ if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) == 0) { idx = sc->sc_nexttx; zc->zc_xmit[idx].ze_tdes1 = ZE_TDES1_DT_SETUP; zc->zc_xmit[idx].ze_bufsize = 128; zc->zc_xmit[idx].ze_bufaddr = sc->sc_pzedata->zc_setup; zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW; if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN) ZE_WCSR(ZE_CSR1, -1); sc->sc_inq++; if (++sc->sc_nexttx == TXDESCS) sc->sc_nexttx = 0; } }
void be_mcreset(struct be_softc *sc) { struct ethercom *ec = &sc->sc_ethercom; struct ifnet *ifp = &sc->sc_ethercom.ec_if; bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t br = sc->sc_br; uint32_t v; uint32_t crc; uint16_t hash[4]; struct ether_multi *enm; struct ether_multistep step; if (ifp->if_flags & IFF_PROMISC) { v = bus_space_read_4(t, br, BE_BRI_RXCFG); v |= BE_BR_RXCFG_PMISC; bus_space_write_4(t, br, BE_BRI_RXCFG, v); return; } if (ifp->if_flags & IFF_ALLMULTI) { hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; goto chipit; } hash[3] = hash[2] = hash[1] = hash[0] = 0; ETHER_FIRST_MULTI(step, ec, enm); while (enm != NULL) { if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { /* * We must listen to a range of multicast * addresses. For now, just accept all * multicasts, rather than trying to set only * those filter bits needed to match the range. * (At this time, the only use of address * ranges is for IP multicast routing, for * which the range is big enough to require * all bits set.) */ hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; ifp->if_flags |= IFF_ALLMULTI; goto chipit; } crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); /* Just want the 6 most significant bits. */ crc >>= 26; hash[crc >> 4] |= 1 << (crc & 0xf); ETHER_NEXT_MULTI(step, enm); } ifp->if_flags &= ~IFF_ALLMULTI; chipit: /* Enable the hash filter */ bus_space_write_4(t, br, BE_BRI_HASHTAB0, hash[0]); bus_space_write_4(t, br, BE_BRI_HASHTAB1, hash[1]); bus_space_write_4(t, br, BE_BRI_HASHTAB2, hash[2]); bus_space_write_4(t, br, BE_BRI_HASHTAB3, hash[3]); v = bus_space_read_4(t, br, BE_BRI_RXCFG); v &= ~BE_BR_RXCFG_PMISC; v |= BE_BR_RXCFG_HENABLE; bus_space_write_4(t, br, BE_BRI_RXCFG, v); }