int bmac_mediachange(struct ifnet *ifp) { struct bmac_softc *sc = ifp->if_softc; return mii_mediachg(&sc->sc_mii); }
static void octe_init(void *arg) { struct ifnet *ifp; cvm_oct_private_t *priv; priv = arg; ifp = priv->ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) octe_stop(priv); if (priv->open != NULL) priv->open(ifp); if (((ifp->if_flags ^ priv->if_flags) & (IFF_ALLMULTI | IFF_MULTICAST | IFF_PROMISC)) != 0) cvm_oct_common_set_multicast_list(ifp); cvm_oct_common_set_mac_address(ifp, IF_LLADDR(ifp)); cvm_oct_common_poll(ifp); if (priv->miibus != NULL) mii_mediachg(device_get_softc(priv->miibus)); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; }
int mec_init(struct ifnet *ifp) { struct mec_softc *sc = ifp->if_softc; bus_space_tag_t st = sc->sc_st; bus_space_handle_t sh = sc->sc_sh; struct mec_rxdesc *rxd; int i; /* Cancel any pending I/O. */ mec_stop(ifp); /* Reset device. */ mec_reset(sc); /* Setup filter for multicast or promisc mode. */ mec_setfilter(sc); /* Set the TX ring pointer to the base address. */ bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0)); sc->sc_txpending = 0; sc->sc_txdirty = 0; sc->sc_txlast = MEC_NTXDESC - 1; /* Put RX buffers into FIFO. */ for (i = 0; i < MEC_NRXDESC; i++) { rxd = &sc->sc_rxdesc[i]; rxd->rxd_stat = 0; MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD); MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD); bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i)); } sc->sc_rxptr = 0; #if 0 /* XXX no info */ bus_space_write_8(st, sh, MEC_TIMER, 0); #endif /* * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes * spurious interrupts when TX buffers are empty. */ bus_space_write_8(st, sh, MEC_DMA_CONTROL, (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) | (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) | MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */ MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE); timeout_add(&sc->sc_tick_ch, hz); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; mec_start(ifp); mii_mediachg(&sc->sc_mii); return 0; }
int che_ifmedia_upd(struct ifnet *ifp) { struct che_softc *sc = ifp->if_softc; mii_mediachg(&sc->sc_mii); return (0); }
/* Set hardware to newly-selected media */ int bce_mediachange(struct ifnet *ifp) { struct bce_softc *sc = ifp->if_softc; if (ifp->if_flags & IFF_UP) mii_mediachg(&sc->bce_mii); return (0); }
int ax88190_mediachange(struct dp8390_softc *sc) { int rc; if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) return 0; return rc; }
int octeon_eth_mediachange(struct ifnet *ifp) { struct octeon_eth_softc *sc = ifp->if_softc; if ((ifp->if_flags & IFF_UP) == 0) return 0; return mii_mediachg(&sc->sc_mii); }
int cpsw_mediachange(struct ifnet *ifp) { struct cpsw_softc *sc = ifp->if_softc; if (LIST_FIRST(&sc->sc_mii.mii_phys)) mii_mediachg(&sc->sc_mii); return (0); }
static void ed_pccard_mediachg(struct ed_softc *sc) { struct mii_data *mii; if (sc->miibus == NULL) return; mii = device_get_softc(sc->miibus); mii_mediachg(mii); }
static int ed_pccard_kick_phy(struct ed_softc *sc) { struct mii_softc *miisc; struct mii_data *mii; mii = device_get_softc(sc->miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); return (mii_mediachg(mii)); }
/** * @group IFmedia routines. * @{ */ static int dtsec_ifmedia_upd(struct ifnet *ifp) { struct dtsec_softc *sc = ifp->if_softc; DTSEC_LOCK(sc); mii_mediachg(sc->sc_mii); DTSEC_UNLOCK(sc); return (0); }
int cas_mediachange(struct ifnet *ifp) { struct cas_softc *sc = ifp->if_softc; struct mii_data *mii = &sc->sc_mii; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } return (mii_mediachg(&sc->sc_mii)); }
static int octe_mii_medchange(struct ifnet *ifp) { cvm_oct_private_t *priv; struct mii_data *mii; struct mii_softc *miisc; priv = ifp->if_softc; mii = device_get_softc(priv->miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); mii_mediachg(mii); return (0); }
int imxenet_ifmedia_upd(struct ifnet *ifp) { struct imxenet_softc *sc = ifp->if_softc; struct mii_data *mii = &sc->sc_mii; int err; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } err = mii_mediachg(mii); return (err); }
static int awg_media_change(if_t ifp) { struct awg_softc *sc; struct mii_data *mii; int error; sc = if_getsoftc(ifp); mii = device_get_softc(sc->miibus); AWG_LOCK(sc); error = mii_mediachg(mii); AWG_UNLOCK(sc); return (error); }
int vte_mediachange(struct ifnet *ifp) { struct vte_softc *sc = ifp->if_softc; struct mii_data *mii = &sc->sc_miibus; int error; if (mii->mii_instance != 0) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } error = mii_mediachg(mii); return (error); }
/* Set media options */ Static int url_ifmedia_change(struct ifnet *ifp) { struct url_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); int rc; DPRINTF(("%s: %s: enter\n", device_xname(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); sc->sc_link = 0; if ((rc = mii_mediachg(mii)) == ENXIO) return 0; return rc; }
/* * Set media options. */ static int kr_ifmedia_upd(struct ifnet *ifp) { struct kr_softc *sc; struct mii_data *mii; struct mii_softc *miisc; int error; sc = ifp->if_softc; KR_LOCK(sc); mii = device_get_softc(sc->kr_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); KR_UNLOCK(sc); return (error); }
int ste_ifmedia_upd(struct ifnet *ifp) { struct ste_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = &sc->sc_mii; sc->ste_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); }
/* Set media options */ Static int url_ifmedia_change(struct ifnet *ifp) { struct url_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); sc->sc_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } return (mii_mediachg(mii)); }
int smap_init(struct ifnet *ifp) { struct smap_softc *sc = ifp->if_softc; u_int16_t r16; int rc; smap_fifo_init(sc); emac3_reset(&sc->emac3); smap_desc_init(sc); _reg_write_2(SPD_INTR_CLEAR_REG16, SPD_INTR_RXEND | SPD_INTR_TXEND | SPD_INTR_RXDNV); emac3_intr_clear(); r16 = _reg_read_2(SPD_INTR_ENABLE_REG16); r16 |= SPD_INTR_EMAC3 | SPD_INTR_RXEND | SPD_INTR_TXEND | SPD_INTR_RXDNV; _reg_write_2(SPD_INTR_ENABLE_REG16, r16); emac3_intr_enable(); emac3_enable(); /* Program the multicast filter, if necessary. */ emac3_setmulti(&sc->emac3, &sc->ethercom); /* Set current media. */ if ((rc = mii_mediachg(&sc->emac3.mii)) == ENXIO) rc = 0; else if (rc != 0) return rc; ifp->if_flags |= IFF_RUNNING; return (0); }
/* initialize the interface */ int bce_init(struct ifnet *ifp) { struct bce_softc *sc = ifp->if_softc; u_int32_t reg_win; int error; int i; /* Cancel any pending I/O. */ bce_stop(ifp, 0); /* enable pci inerrupts, bursts, and prefetch */ /* remap the pci registers to the Sonics config registers */ /* save the current map, so it can be restored */ reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN); /* set register window to Sonics registers */ pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN, BCE_SONICS_WIN); /* enable SB to PCI interrupt */ bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC, bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) | SBIV_ENET0); /* enable prefetch and bursts for sonics-to-pci translation 2 */ bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2, bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) | SBTOPCI_PREF | SBTOPCI_BURST); /* restore to ethernet register space */ pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN, reg_win); /* Reset the chip to a known state. */ bce_reset(sc); /* Initialize transmit descriptors */ memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot)); sc->bce_txsnext = 0; sc->bce_txin = 0; /* enable crc32 generation and set proper LED modes */ bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL, bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) | BCE_EMC_CRC32_ENAB | BCE_EMC_LED); /* reset or clear powerdown control bit */ bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL, bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) & ~BCE_EMC_PDOWN); /* setup DMA interrupt control */ bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24); /* MAGIC */ /* setup packet filter */ bce_set_filter(ifp); /* set max frame length, account for possible VLAN tag */ bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); /* set tx watermark */ bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56); /* enable transmit */ bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE); bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR, sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000); /* MAGIC */ /* * Give the receive ring to the chip, and * start the receive DMA engine. */ sc->bce_rxin = 0; /* clear the rx descriptor ring */ memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot)); /* enable receive */ bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, BCE_PREPKT_HEADER_SIZE << 1 | XC_XE); bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR, sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000); /* MAGIC */ /* Initialize receive descriptors */ for (i = 0; i < BCE_NRXDESC; i++) { if (sc->bce_cdata.bce_rx_chain[i] == NULL) { if ((error = bce_add_rxbuf(sc, i)) != 0) { printf("%s: unable to allocate or map rx(%d) " "mbuf, error = %d\n", sc->bce_dev.dv_xname, i, error); bce_rxdrain(sc); return (error); } } else BCE_INIT_RXDESC(sc, i); } /* Enable interrupts */ sc->bce_intmask = I_XI | I_RI | I_XU | I_RO | I_RU | I_DE | I_PD | I_PC | I_TO; bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, sc->bce_intmask); /* start the receive dma */ bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR, BCE_NRXDESC * sizeof(struct bce_dma_slot)); /* set media */ mii_mediachg(&sc->bce_mii); /* turn on the ethernet mac */ bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL) | EC_EE); /* start timer */ timeout_add(&sc->bce_timeout, hz); /* mark as running, and no outputs active */ ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; return 0; }
void ax88190_init_card(struct dp8390_softc *sc) { mii_mediachg(&sc->sc_mii); }
void dl10019_init_card(struct dp8390_softc *sc) { dl10019_mii_reset(sc); mii_mediachg(&sc->sc_mii); }
int dl10019_mediachange(struct dp8390_softc *sc) { mii_mediachg(&sc->sc_mii); return (0); }
static int ffec_media_change_locked(struct ffec_softc *sc) { return (mii_mediachg(sc->mii_softc)); }
Static int url_init(struct ifnet *ifp) { struct url_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); u_char *eaddr; int i, s; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (EIO); s = splnet(); /* Cancel pending I/O and free all TX/RX buffers */ url_stop(ifp, 1); #if defined(__OpenBSD__) eaddr = sc->sc_ac.ac_enaddr; #elif defined(__NetBSD__) eaddr = LLADDR(ifp->if_sadl); #endif for (i = 0; i < ETHER_ADDR_LEN; i++) url_csr_write_1(sc, URL_IDR0 + i, eaddr[i]); /* Init transmission control register */ URL_CLRBIT(sc, URL_TCR, URL_TCR_TXRR1 | URL_TCR_TXRR0 | URL_TCR_IFG1 | URL_TCR_IFG0 | URL_TCR_NOCRC); /* Init receive control register */ URL_SETBIT2(sc, URL_RCR, URL_RCR_TAIL | URL_RCR_AD); if (ifp->if_flags & IFF_BROADCAST) URL_SETBIT2(sc, URL_RCR, URL_RCR_AB); else URL_CLRBIT2(sc, URL_RCR, URL_RCR_AB); /* If we want promiscuous mode, accept all physical frames. */ if (ifp->if_flags & IFF_PROMISC) URL_SETBIT2(sc, URL_RCR, URL_RCR_AAM|URL_RCR_AAP); else URL_CLRBIT2(sc, URL_RCR, URL_RCR_AAM|URL_RCR_AAP); /* Initialize transmit ring */ if (url_tx_list_init(sc) == ENOBUFS) { printf("%s: tx list init failed\n", USBDEVNAME(sc->sc_dev)); splx(s); return (EIO); } /* Initialize receive ring */ if (url_rx_list_init(sc) == ENOBUFS) { printf("%s: rx list init failed\n", USBDEVNAME(sc->sc_dev)); splx(s); return (EIO); } /* Load the multicast filter */ url_setmulti(sc); /* Enable RX and TX */ URL_SETBIT(sc, URL_CR, URL_CR_TE | URL_CR_RE); mii_mediachg(mii); if (sc->sc_pipe_tx == NULL || sc->sc_pipe_rx == NULL) { if (url_openpipes(sc)) { splx(s); return (EIO); } } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; splx(s); usb_callout(sc->sc_stat_ch, hz, url_tick, sc); return (0); }
int ax88190_mediachange(struct dp8390_softc *sc) { mii_mediachg(&sc->sc_mii); return (0); }
/* * Initialization of interface; set up initialization block * and transmit/receive descriptor rings. */ int cas_init(struct ifnet *ifp) { struct cas_softc *sc = (struct cas_softc *)ifp->if_softc; bus_space_tag_t t = sc->sc_memt; bus_space_handle_t h = sc->sc_memh; int s; u_int max_frame_size; u_int32_t v; s = splnet(); DPRINTF(sc, ("%s: cas_init: calling stop\n", sc->sc_dev.dv_xname)); /* * Initialization sequence. The numbered steps below correspond * to the sequence outlined in section 6.3.5.1 in the Ethernet * Channel Engine manual (part of the PCIO manual). * See also the STP2002-STQ document from Sun Microsystems. */ /* step 1 & 2. Reset the Ethernet Channel */ cas_stop(ifp, 0); cas_reset(sc); DPRINTF(sc, ("%s: cas_init: restarting\n", sc->sc_dev.dv_xname)); /* Re-initialize the MIF */ cas_mifinit(sc); /* step 3. Setup data structures in host memory */ cas_meminit(sc); /* step 4. TX MAC registers & counters */ cas_init_regs(sc); max_frame_size = ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN; v = (max_frame_size) | (0x2000 << 16) /* Burst size */; bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v); /* step 5. RX MAC registers & counters */ cas_setladrf(sc); /* step 6 & 7. Program Descriptor Ring Base Addresses */ KASSERT((CAS_CDTXADDR(sc, 0) & 0x1fff) == 0); bus_space_write_4(t, h, CAS_TX_RING_PTR_HI, (((uint64_t)CAS_CDTXADDR(sc,0)) >> 32)); bus_space_write_4(t, h, CAS_TX_RING_PTR_LO, CAS_CDTXADDR(sc, 0)); KASSERT((CAS_CDRXADDR(sc, 0) & 0x1fff) == 0); bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI, (((uint64_t)CAS_CDRXADDR(sc,0)) >> 32)); bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO, CAS_CDRXADDR(sc, 0)); KASSERT((CAS_CDRXCADDR(sc, 0) & 0x1fff) == 0); bus_space_write_4(t, h, CAS_RX_CRING_PTR_HI, (((uint64_t)CAS_CDRXCADDR(sc,0)) >> 32)); bus_space_write_4(t, h, CAS_RX_CRING_PTR_LO, CAS_CDRXCADDR(sc, 0)); if (CAS_PLUS(sc)) { KASSERT((CAS_CDRXADDR2(sc, 0) & 0x1fff) == 0); bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI2, (((uint64_t)CAS_CDRXADDR2(sc,0)) >> 32)); bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO2, CAS_CDRXADDR2(sc, 0)); } /* step 8. Global Configuration & Interrupt Mask */ bus_space_write_4(t, h, CAS_INTMASK, ~(CAS_INTR_TX_INTME|CAS_INTR_TX_EMPTY| CAS_INTR_TX_TAG_ERR| CAS_INTR_RX_DONE|CAS_INTR_RX_NOBUF| CAS_INTR_RX_TAG_ERR| CAS_INTR_RX_COMP_FULL|CAS_INTR_PCS| CAS_INTR_MAC_CONTROL|CAS_INTR_MIF| CAS_INTR_BERR)); bus_space_write_4(t, h, CAS_MAC_RX_MASK, CAS_MAC_RX_DONE|CAS_MAC_RX_FRAME_CNT); bus_space_write_4(t, h, CAS_MAC_TX_MASK, CAS_MAC_TX_XMIT_DONE); bus_space_write_4(t, h, CAS_MAC_CONTROL_MASK, 0); /* XXXX */ /* step 9. ETX Configuration: use mostly default values */ /* Enable DMA */ v = cas_ringsize(CAS_NTXDESC /*XXX*/) << 10; bus_space_write_4(t, h, CAS_TX_CONFIG, v|CAS_TX_CONFIG_TXDMA_EN|(1<<24)|(1<<29)); bus_space_write_4(t, h, CAS_TX_KICK, 0); /* step 10. ERX Configuration */ /* Encode Receive Descriptor ring size */ v = cas_ringsize(CAS_NRXDESC) << CAS_RX_CONFIG_RXDRNG_SZ_SHIFT; if (CAS_PLUS(sc)) v |= cas_ringsize(32) << CAS_RX_CONFIG_RXDRNG2_SZ_SHIFT; /* Encode Receive Completion ring size */ v |= cas_cringsize(CAS_NRXCOMP) << CAS_RX_CONFIG_RXCRNG_SZ_SHIFT; /* Enable DMA */ bus_space_write_4(t, h, CAS_RX_CONFIG, v|(2<<CAS_RX_CONFIG_FBOFF_SHFT)|CAS_RX_CONFIG_RXDMA_EN); /* * The following value is for an OFF Threshold of about 3/4 full * and an ON Threshold of 1/4 full. */ bus_space_write_4(t, h, CAS_RX_PAUSE_THRESH, (3 * sc->sc_rxfifosize / 256) | ( (sc->sc_rxfifosize / 256) << 12)); bus_space_write_4(t, h, CAS_RX_BLANKING, (6<<12)|6); /* step 11. Configure Media */ mii_mediachg(&sc->sc_mii); /* step 12. RX_MAC Configuration Register */ v = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); v |= CAS_MAC_RX_ENABLE | CAS_MAC_RX_STRIP_CRC; bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, v); /* step 14. Issue Transmit Pending command */ /* step 15. Give the receiver a swift kick */ bus_space_write_4(t, h, CAS_RX_KICK, CAS_NRXDESC-4); if (CAS_PLUS(sc)) bus_space_write_4(t, h, CAS_RX_KICK2, 4); /* Start the one second timer. */ timeout_add(&sc->sc_tick_ch, hz); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; ifp->if_timer = 0; splx(s); return (0); }
static void kr_init_locked(struct kr_softc *sc) { struct ifnet *ifp = sc->kr_ifp; struct mii_data *mii; KR_LOCK_ASSERT(sc); mii = device_get_softc(sc->kr_miibus); kr_stop(sc); kr_reset(sc); CSR_WRITE_4(sc, KR_ETHINTFC, ETH_INTFC_EN); /* Init circular RX list. */ if (kr_rx_ring_init(sc) != 0) { device_printf(sc->kr_dev, "initialization failed: no memory for rx buffers\n"); kr_stop(sc); return; } /* Init tx descriptors. */ kr_tx_ring_init(sc); KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, 0); KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_NDPTR, 0); KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR, sc->kr_rdata.kr_rx_ring_paddr); KR_DMA_CLEARBITS_REG(KR_DMA_RXCHAN, DMA_SM, DMA_SM_H | DMA_SM_E | DMA_SM_D) ; KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, 0); KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 0); KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_DPTR, 0); KR_DMA_CLEARBITS_REG(KR_DMA_TXCHAN, DMA_SM, DMA_SM_F | DMA_SM_E); /* Accept only packets destined for THIS Ethernet device address */ CSR_WRITE_4(sc, KR_ETHARC, 1); /* * Set all Ethernet address registers to the same initial values * set all four addresses to 66-88-aa-cc-dd-ee */ CSR_WRITE_4(sc, KR_ETHSAL0, 0x42095E6B); CSR_WRITE_4(sc, KR_ETHSAH0, 0x0000000C); CSR_WRITE_4(sc, KR_ETHSAL1, 0x42095E6B); CSR_WRITE_4(sc, KR_ETHSAH1, 0x0000000C); CSR_WRITE_4(sc, KR_ETHSAL2, 0x42095E6B); CSR_WRITE_4(sc, KR_ETHSAH2, 0x0000000C); CSR_WRITE_4(sc, KR_ETHSAL3, 0x42095E6B); CSR_WRITE_4(sc, KR_ETHSAH3, 0x0000000C); CSR_WRITE_4(sc, KR_ETHMAC2, KR_ETH_MAC2_PEN | KR_ETH_MAC2_CEN | KR_ETH_MAC2_FD); CSR_WRITE_4(sc, KR_ETHIPGT, KR_ETHIPGT_FULL_DUPLEX); CSR_WRITE_4(sc, KR_ETHIPGR, 0x12); /* minimum value */ CSR_WRITE_4(sc, KR_MIIMCFG, KR_MIIMCFG_R); DELAY(1000); CSR_WRITE_4(sc, KR_MIIMCFG, 0); /* TODO: calculate prescale */ CSR_WRITE_4(sc, KR_ETHMCP, (165000000 / (1250000 + 1)) & ~1); /* FIFO Tx threshold level */ CSR_WRITE_4(sc, KR_ETHFIFOTT, 0x30); CSR_WRITE_4(sc, KR_ETHMAC1, KR_ETH_MAC1_RE); sc->kr_link_status = 0; mii_mediachg(mii); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->kr_stat_callout, hz, kr_tick, sc); }