int sq_ioctl(struct ifnet *ifp, u_long cmd, void *data) { int s, error = 0; SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0); s = splnet(); error = ether_ioctl(ifp, cmd, data); if (error == ENETRESET) { /* * Multicast list has changed; set the hardware filter * accordingly. */ if (ifp->if_flags & IFF_RUNNING) error = sq_init(ifp); else error = 0; } splx(s); return error; }
static int sq_txintr(struct sq_softc *sc) { int i; u_int32_t status; struct ifnet *ifp = &sc->sc_ethercom.ec_if; status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL); SQ_TRACE(SQ_TXINTR_ENTER, sc->sc_prevtx, status, sc->sc_nfreetx); if ((status & (ENETX_CTL_ACTIVE | TXSTAT_GOOD)) == 0) { if (status & TXSTAT_COLL) ifp->if_collisions++; if (status & TXSTAT_UFLOW) { printf("%s: transmit underflow\n", sc->sc_dev.dv_xname); ifp->if_oerrors++; } if (status & TXSTAT_16COLL) { printf("%s: max collisions reached\n", sc->sc_dev.dv_xname); ifp->if_oerrors++; ifp->if_collisions += 16; } } i = sc->sc_prevtx; while (sc->sc_nfreetx < SQ_NTXDESC) { /* * Check status first so we don't end up with a case of * the buffer not being finished while the DMA channel * has gone idle. */ status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL); SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); /* If not yet transmitted, try and start DMA engine again */ if ((sc->sc_txdesc[i].hdd_ctl & HDD_CTL_XMITDONE) == 0) { if ((status & ENETX_CTL_ACTIVE) == 0) { SQ_TRACE(SQ_RESTART_DMA, i, status, sc->sc_nfreetx); bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_NDBP, SQ_CDTXADDR(sc, i)); /* Kick DMA channel into life */ bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL, ENETX_CTL_ACTIVE); /* * Set a watchdog timer in case the chip * flakes out. */ ifp->if_timer = 5; } else { SQ_TRACE(SQ_TXINTR_BUSY, i, status, sc->sc_nfreetx); } break; } /* Sync the packet data, unload DMA map, free mbuf */ bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0, sc->sc_txmap[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); m_freem(sc->sc_txmbuf[i]); sc->sc_txmbuf[i] = NULL; ifp->if_opackets++; sc->sc_nfreetx++; SQ_TRACE(SQ_DONE_DMA, i, status, sc->sc_nfreetx); i = SQ_NEXTTX(i); } /* prevtx now points to next xmit packet not yet finished */ sc->sc_prevtx = i; /* If we have buffers free, let upper layers know */ if (sc->sc_nfreetx > 0) ifp->if_flags &= ~IFF_OACTIVE; /* If all packets have left the coop, cancel watchdog */ if (sc->sc_nfreetx == SQ_NTXDESC) ifp->if_timer = 0; SQ_TRACE(SQ_TXINTR_EXIT, sc->sc_prevtx, status, sc->sc_nfreetx); sq_start(ifp); return 1; }
void sq_start(struct ifnet *ifp) { struct sq_softc *sc = ifp->if_softc; u_int32_t status; struct mbuf *m0, *m; bus_dmamap_t dmamap; int err, totlen, nexttx, firsttx, lasttx, ofree, seg; if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) return; /* * Remember the previous number of free descriptors and * the first descriptor we'll use. */ ofree = sc->sc_nfreetx; firsttx = sc->sc_nexttx; /* * Loop through the send queue, setting up transmit descriptors * until we drain the queue, or use up all available transmit * descriptors. */ while (sc->sc_nfreetx != 0) { /* * Grab a packet off the queue. */ IFQ_POLL(&ifp->if_snd, m0); if (m0 == NULL) break; m = NULL; dmamap = sc->sc_txmap[sc->sc_nexttx]; /* * Load the DMA map. If this fails, the packet either * didn't fit in the alloted number of segments, or we were * short on resources. In this case, we'll copy and try * again. */ if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, BUS_DMA_NOWAIT) != 0) { MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { printf("%s: unable to allocate Tx mbuf\n", sc->sc_dev.dv_xname); break; } if (m0->m_pkthdr.len > MHLEN) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { printf("%s: unable to allocate Tx " "cluster\n", sc->sc_dev.dv_xname); m_freem(m); break; } } m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m, BUS_DMA_NOWAIT)) != 0) { printf("%s: unable to load Tx buffer, " "error = %d\n", sc->sc_dev.dv_xname, err); break; } } /* * Ensure we have enough descriptors free to describe * the packet. */ if (dmamap->dm_nsegs > sc->sc_nfreetx) { /* * Not enough free descriptors to transmit this * packet. We haven't committed to anything yet, * so just unload the DMA map, put the packet * back on the queue, and punt. Notify the upper * layer that there are no more slots left. * * XXX We could allocate an mbuf and copy, but * XXX it is worth it? */ ifp->if_flags |= IFF_OACTIVE; bus_dmamap_unload(sc->sc_dmat, dmamap); if (m != NULL) m_freem(m); break; } IFQ_DEQUEUE(&ifp->if_snd, m0); if (m != NULL) { m_freem(m0); m0 = m; } /* * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ /* Sync the DMA map. */ bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); /* * Initialize the transmit descriptors. */ for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0; seg < dmamap->dm_nsegs; seg++, nexttx = SQ_NEXTTX(nexttx)) { sc->sc_txdesc[nexttx].hdd_bufptr = dmamap->dm_segs[seg].ds_addr; sc->sc_txdesc[nexttx].hdd_ctl = dmamap->dm_segs[seg].ds_len; sc->sc_txdesc[nexttx].hdd_descptr= SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx)); lasttx = nexttx; totlen += dmamap->dm_segs[seg].ds_len; } /* Last descriptor gets end-of-packet */ sc->sc_txdesc[lasttx].hdd_ctl |= HDD_CTL_EOPACKET; /* XXXrkb: if not EDLC, pad to min len manually */ if (totlen < ETHER_MIN_LEN) { sc->sc_txdesc[lasttx].hdd_ctl += (ETHER_MIN_LEN - totlen); totlen = ETHER_MIN_LEN; } #if 0 printf("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname, sc->sc_nexttx, lasttx, totlen); #endif if (ifp->if_flags & IFF_DEBUG) { printf(" transmit chain:\n"); for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) { printf(" descriptor %d:\n", seg); printf(" hdd_bufptr: 0x%08x\n", sc->sc_txdesc[seg].hdd_bufptr); printf(" hdd_ctl: 0x%08x\n", sc->sc_txdesc[seg].hdd_ctl); printf(" hdd_descptr: 0x%08x\n", sc->sc_txdesc[seg].hdd_descptr); if (seg == lasttx) break; } } /* Sync the descriptors we're using. */ SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* Store a pointer to the packet so we can free it later */ sc->sc_txmbuf[sc->sc_nexttx] = m0; /* Advance the tx pointer. */ sc->sc_nfreetx -= dmamap->dm_nsegs; sc->sc_nexttx = nexttx; #if NBPFILTER > 0 /* * Pass the packet to any BPF listeners. */ if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m0); #endif /* NBPFILTER > 0 */ } /* All transmit descriptors used up, let upper layers know */ if (sc->sc_nfreetx == 0) ifp->if_flags |= IFF_OACTIVE; if (sc->sc_nfreetx != ofree) { #if 0 printf("%s: %d packets enqueued, first %d, INTR on %d\n", sc->sc_dev.dv_xname, lasttx - firsttx + 1, firsttx, lasttx); #endif /* * Cause a transmit interrupt to happen on the * last packet we enqueued, mark it as the last * descriptor. */ sc->sc_txdesc[lasttx].hdd_ctl |= (HDD_CTL_INTR | HDD_CTL_EOCHAIN); SQ_CDTXSYNC(sc, lasttx, 1, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * There is a potential race condition here if the HPC * DMA channel is active and we try and either update * the 'next descriptor' pointer in the HPC PIO space * or the 'next descriptor' pointer in a previous desc- * riptor. * * To avoid this, if the channel is active, we rely on * the transmit interrupt routine noticing that there * are more packets to send and restarting the HPC DMA * engine, rather than mucking with the DMA state here. */ status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL); if ((status & ENETX_CTL_ACTIVE) != 0) { SQ_TRACE(SQ_ADD_TO_DMA, firsttx, status, sc->sc_nfreetx); sc->sc_txdesc[SQ_PREVTX(firsttx)].hdd_ctl &= ~HDD_CTL_EOCHAIN; SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } else { SQ_TRACE(SQ_START_DMA, firsttx, status, sc->sc_nfreetx); bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_NDBP, SQ_CDTXADDR(sc, firsttx)); /* Kick DMA channel into life */ bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL, ENETX_CTL_ACTIVE); } /* Set a watchdog timer in case the chip flakes out. */ ifp->if_timer = 5; } }
/* Set up data to get the interface up and running. */ int sq_init(struct ifnet *ifp) { int i; u_int32_t reg; struct sq_softc *sc = ifp->if_softc; /* Cancel any in-progress I/O */ sq_stop(ifp, 0); sc->sc_nextrx = 0; sc->sc_nfreetx = SQ_NTXDESC; sc->sc_nexttx = sc->sc_prevtx = 0; SQ_TRACE(SQ_RESET, 0, 0, sc->sc_nfreetx); /* Set into 8003 mode, bank 0 to program ethernet address */ bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, TXCMD_BANK0); /* Now write the address */ for (i = 0; i < ETHER_ADDR_LEN; i++) bus_space_write_1(sc->sc_regt, sc->sc_regh, i, sc->sc_enaddr[i]); sc->sc_rxcmd = RXCMD_IE_CRC | RXCMD_IE_DRIB | RXCMD_IE_SHORT | RXCMD_IE_END | RXCMD_IE_GOOD; /* * Set the receive filter -- this will add some bits to the * prototype RXCMD register. Do this before setting the * transmit config register, since we might need to switch * banks. */ sq_set_filter(sc); /* Set up Seeq transmit command register */ bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, TXCMD_IE_UFLOW | TXCMD_IE_COLL | TXCMD_IE_16COLL | TXCMD_IE_GOOD); /* Now write the receive command register. */ bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, sc->sc_rxcmd); /* Set up HPC ethernet DMA config */ reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG); bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG, reg | ENETR_DMACFG_FIX_RXDC | ENETR_DMACFG_FIX_INTR | ENETR_DMACFG_FIX_EOP); /* Pass the start of the receive ring to the HPC */ bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_NDBP, SQ_CDRXADDR(sc, 0)); /* And turn on the HPC ethernet receive channel */ bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, ENETR_CTL_ACTIVE); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; return 0; }
/* Set up data to get the interface up and running. */ int sq_init(struct ifnet *ifp) { int i; struct sq_softc *sc = ifp->if_softc; /* Cancel any in-progress I/O */ sq_stop(ifp, 0); sc->sc_nextrx = 0; sc->sc_nfreetx = SQ_NTXDESC; sc->sc_nexttx = sc->sc_prevtx = 0; SQ_TRACE(SQ_RESET, sc, 0, 0); /* Set into 8003 mode, bank 0 to program ethernet address */ sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0); /* Now write the address */ for (i = 0; i < ETHER_ADDR_LEN; i++) sq_seeq_write(sc, i, sc->sc_enaddr[i]); sc->sc_rxcmd = RXCMD_IE_CRC | RXCMD_IE_DRIB | RXCMD_IE_SHORT | RXCMD_IE_END | RXCMD_IE_GOOD; /* * Set the receive filter -- this will add some bits to the * prototype RXCMD register. Do this before setting the * transmit config register, since we might need to switch * banks. */ sq_set_filter(sc); /* Set up Seeq transmit command register */ sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_IE_UFLOW | TXCMD_IE_COLL | TXCMD_IE_16COLL | TXCMD_IE_GOOD); /* Now write the receive command register. */ sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd); /* * Set up HPC ethernet PIO and DMA configurations. * * The PROM appears to do most of this for the onboard HPC3, but * not for the Challenge S's IOPLUS chip. We copy how the onboard * chip is configured and assume that it's correct for both. */ if (sc->hpc_regs->revision == 3) { uint32_t dmareg, pioreg; pioreg = HPC3_ENETR_PIOCFG_P1(1) | HPC3_ENETR_PIOCFG_P2(6) | HPC3_ENETR_PIOCFG_P3(1); dmareg = HPC3_ENETR_DMACFG_D1(6) | HPC3_ENETR_DMACFG_D2(2) | HPC3_ENETR_DMACFG_D3(0) | HPC3_ENETR_DMACFG_FIX_RXDC | HPC3_ENETR_DMACFG_FIX_INTR | HPC3_ENETR_DMACFG_FIX_EOP | HPC3_ENETR_DMACFG_TIMEOUT; sq_hpc_write(sc, HPC3_ENETR_PIOCFG, pioreg); sq_hpc_write(sc, HPC3_ENETR_DMACFG, dmareg); } /* Pass the start of the receive ring to the HPC */ sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0)); /* And turn on the HPC ethernet receive channel */ sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, sc->hpc_regs->enetr_ctl_active); /* * Turn off delayed receive interrupts on HPC1. * (see Hollywood HPC Specification 2.1.4.3) */ if (sc->hpc_regs->revision != 3) sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; return 0; }