static void kr_link_task(void *arg, int pending) { struct kr_softc *sc; struct mii_data *mii; struct ifnet *ifp; /* int lfdx, mfdx; */ sc = (struct kr_softc *)arg; KR_LOCK(sc); mii = device_get_softc(sc->kr_miibus); ifp = sc->kr_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { KR_UNLOCK(sc); return; } if (mii->mii_media_status & IFM_ACTIVE) { if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) sc->kr_link_status = 1; } else sc->kr_link_status = 0; KR_UNLOCK(sc); }
static void kr_tx_intr(void *arg) { struct kr_softc *sc = arg; uint32_t status; KR_LOCK(sc); /* mask out interrupts */ KR_DMA_SETBITS_REG(KR_DMA_TXCHAN, DMA_SM, DMA_SM_F | DMA_SM_E); status = KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_S); if (status & (DMA_S_F | DMA_S_E)) { kr_tx(sc); if (status & DMA_S_E) device_printf(sc->kr_dev, "DMA error\n"); } KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, ~status); /* Enable F, E interrupts */ KR_DMA_CLEARBITS_REG(KR_DMA_TXCHAN, DMA_SM, DMA_SM_F | DMA_SM_E); KR_UNLOCK(sc); }
static void kr_init(void *xsc) { struct kr_softc *sc = xsc; KR_LOCK(sc); kr_init_locked(sc); KR_UNLOCK(sc); }
static void kr_start(struct ifnet *ifp) { struct kr_softc *sc; sc = ifp->if_softc; KR_LOCK(sc); kr_start_locked(ifp); KR_UNLOCK(sc); }
static int kr_detach(device_t dev) { struct kr_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->kr_ifp; KASSERT(mtx_initialized(&sc->kr_mtx), ("vr mutex not initialized")); /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { KR_LOCK(sc); sc->kr_detach = 1; kr_stop(sc); KR_UNLOCK(sc); taskqueue_drain(taskqueue_swi, &sc->kr_link_task); ether_ifdetach(ifp); } if (sc->kr_miibus) device_delete_child(dev, sc->kr_miibus); bus_generic_detach(dev); if (sc->kr_rx_intrhand) bus_teardown_intr(dev, sc->kr_rx_irq, sc->kr_rx_intrhand); if (sc->kr_rx_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_rx_irq); if (sc->kr_tx_intrhand) bus_teardown_intr(dev, sc->kr_tx_irq, sc->kr_tx_intrhand); if (sc->kr_tx_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_tx_irq); if (sc->kr_rx_und_intrhand) bus_teardown_intr(dev, sc->kr_rx_und_irq, sc->kr_rx_und_intrhand); if (sc->kr_rx_und_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_rx_und_irq); if (sc->kr_tx_ovr_intrhand) bus_teardown_intr(dev, sc->kr_tx_ovr_irq, sc->kr_tx_ovr_intrhand); if (sc->kr_tx_ovr_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_tx_ovr_irq); if (sc->kr_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->kr_rid, sc->kr_res); if (ifp) if_free(ifp); kr_dma_free(sc); mtx_destroy(&sc->kr_mtx); return (0); }
/* * Report current media status. */ static void kr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct kr_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->kr_miibus); KR_LOCK(sc); mii_pollstat(mii); KR_UNLOCK(sc); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; }
static int kr_shutdown(device_t dev) { struct kr_softc *sc; sc = device_get_softc(dev); KR_LOCK(sc); kr_stop(sc); KR_UNLOCK(sc); return (0); }
/* * Set media options. */ static int kr_ifmedia_upd(struct ifnet *ifp) { struct kr_softc *sc; struct mii_data *mii; struct mii_softc *miisc; int error; sc = ifp->if_softc; KR_LOCK(sc); mii = device_get_softc(sc->kr_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); KR_UNLOCK(sc); return (error); }
static void kr_rx_intr(void *arg) { struct kr_softc *sc = arg; uint32_t status; KR_LOCK(sc); /* mask out interrupts */ KR_DMA_SETBITS_REG(KR_DMA_RXCHAN, DMA_SM, DMA_SM_D | DMA_SM_H | DMA_SM_E); status = KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S); if (status & (DMA_S_D | DMA_S_E | DMA_S_H)) { kr_rx(sc); if (status & DMA_S_E) device_printf(sc->kr_dev, "RX DMA error\n"); } /* Reread status */ status = KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S); /* restart DMA RX if it has been halted */ if (status & DMA_S_H) { KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR, KR_RX_RING_ADDR(sc, sc->kr_cdata.kr_rx_cons)); } KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, ~status); /* Enable F, H, E interrupts */ KR_DMA_CLEARBITS_REG(KR_DMA_RXCHAN, DMA_SM, DMA_SM_D | DMA_SM_H | DMA_SM_E); KR_UNLOCK(sc); }
static int kr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct kr_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error; switch (command) { case SIOCSIFFLAGS: #if 0 KR_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc->kr_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) kr_set_filter(sc); } else { if (sc->kr_detach == 0) kr_init_locked(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) kr_stop(sc); } sc->kr_if_flags = ifp->if_flags; KR_UNLOCK(sc); #endif error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: #if 0 KR_LOCK(sc); kr_set_filter(sc); KR_UNLOCK(sc); #endif error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->kr_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: error = 0; #if 0 mask = ifr->ifr_reqcap ^ ifp->if_capenable; if ((mask & IFCAP_HWCSUM) != 0) { ifp->if_capenable ^= IFCAP_HWCSUM; if ((IFCAP_HWCSUM & ifp->if_capenable) && (IFCAP_HWCSUM & ifp->if_capabilities)) ifp->if_hwassist = KR_CSUM_FEATURES; else ifp->if_hwassist = 0; } if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (IFCAP_VLAN_HWTAGGING & ifp->if_capenable && IFCAP_VLAN_HWTAGGING & ifp->if_capabilities && ifp->if_drv_flags & IFF_DRV_RUNNING) { KR_LOCK(sc); kr_vlan_setup(sc); KR_UNLOCK(sc); } } VLAN_CAPABILITIES(ifp); #endif break; default: error = ether_ioctl(ifp, command, data); break; } return (error); }
static void kr_rx(struct kr_softc *sc) { struct kr_rxdesc *rxd; struct ifnet *ifp = sc->kr_ifp; int cons, prog, packet_len, count, error; struct kr_desc *cur_rx; struct mbuf *m; KR_LOCK_ASSERT(sc); cons = sc->kr_cdata.kr_rx_cons; bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag, sc->kr_cdata.kr_rx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (prog = 0; prog < KR_RX_RING_CNT; KR_INC(cons, KR_RX_RING_CNT)) { cur_rx = &sc->kr_rdata.kr_rx_ring[cons]; rxd = &sc->kr_cdata.kr_rxdesc[cons]; m = rxd->rx_m; if ((cur_rx->kr_ctl & KR_CTL_D) == 0) break; prog++; packet_len = KR_PKTSIZE(cur_rx->kr_devcs); count = m->m_len - KR_DMASIZE(cur_rx->kr_ctl); /* Assume it's error */ error = 1; if (packet_len != count) ifp->if_ierrors++; else if (count < 64) ifp->if_ierrors++; else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_LD) == 0) ifp->if_ierrors++; else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_ROK) != 0) { error = 0; bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); m = rxd->rx_m; kr_fixup_rx(m); m->m_pkthdr.rcvif = ifp; /* Skip 4 bytes of CRC */ m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN; ifp->if_ipackets++; KR_UNLOCK(sc); (*ifp->if_input)(ifp, m); KR_LOCK(sc); } if (error) { /* Restore CONTROL and CA values, reset DEVCS */ cur_rx->kr_ctl = rxd->saved_ctl; cur_rx->kr_ca = rxd->saved_ca; cur_rx->kr_devcs = 0; } else { /* Reinit descriptor */ cur_rx->kr_ctl = KR_CTL_IOD; if (cons == KR_RX_RING_CNT - 1) cur_rx->kr_ctl |= KR_CTL_COD; cur_rx->kr_devcs = 0; cur_rx->kr_ca = 0; if (kr_newbuf(sc, cons) != 0) { device_printf(sc->kr_dev, "Failed to allocate buffer\n"); break; } } bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag, sc->kr_cdata.kr_rx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); } if (prog > 0) { sc->kr_cdata.kr_rx_cons = cons; bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag, sc->kr_cdata.kr_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } }