static void tlp_smc9332dst_tmsw_init(struct tulip_softc *sc) { struct tulip_21x4x_media *tm; const char *sep = ""; uint32_t reg; int i, cnt; sc->sc_gp_dir = GPP_SMC9332DST_PINS; sc->sc_opmode = OPMODE_MBO | OPMODE_PS; TULIP_WRITE(sc, CSR_OPMODE, sc->sc_opmode); ifmedia_init(&sc->sc_mii.mii_media, 0, tlp_mediachange, tlp_mediastatus); aprint_normal_dev(sc->sc_dev, ""); #define ADD(m, c) \ tm = malloc(sizeof(*tm), M_DEVBUF, M_WAITOK|M_ZERO); \ tm->tm_opmode = (c); \ tm->tm_gpdata = GPP_SMC9332DST_INIT; \ ifmedia_add(&sc->sc_mii.mii_media, (m), 0, tm) #define PRINT(str) aprint_normal("%s%s", sep, str); sep = ", " ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, 0), OPMODE_TTM); PRINT("10baseT"); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, IFM_FDX, 0), OPMODE_TTM | OPMODE_FD); PRINT("10baseT-FDX"); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, 0), OPMODE_PS | OPMODE_PCS | OPMODE_SCR); PRINT("100baseTX"); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, IFM_FDX, 0), OPMODE_PS | OPMODE_PCS | OPMODE_SCR | OPMODE_FD); PRINT("100baseTX-FDX"); #undef ADD #undef PRINT aprint_normal("\n"); tlp_reset(sc); TULIP_WRITE(sc, CSR_OPMODE, sc->sc_opmode | OPMODE_PCS | OPMODE_SCR); TULIP_WRITE(sc, CSR_GPP, GPP_GPC | sc->sc_gp_dir); delay(10); TULIP_WRITE(sc, CSR_GPP, GPP_SMC9332DST_INIT); delay(200000); cnt = 0; for (i = 1000; i > 0; i--) { reg = TULIP_READ(sc, CSR_GPP); if ((~reg & (GPP_SMC9332DST_OK10 | GPP_SMC9332DST_OK100)) == 0) { if (cnt++ > 100) { break; } } else if ((reg & GPP_SMC9332DST_OK10) == 0) { break; } else { cnt = 0; } delay(1000); } if (cnt > 100) { ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_100_TX); } else { ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T); } }
static int octe_attach(device_t dev) { struct ifnet *ifp; cvm_oct_private_t *priv; device_t child; unsigned qos; int error; priv = device_get_softc(dev); ifp = priv->ifp; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if (priv->phy_id != -1) { if (priv->phy_device == NULL) { error = mii_attach(dev, &priv->miibus, ifp, octe_mii_medchange, octe_mii_medstat, BMSR_DEFCAPMASK, priv->phy_id, MII_OFFSET_ANY, 0); if (error != 0) device_printf(dev, "attaching PHYs failed\n"); } else { child = device_add_child(dev, priv->phy_device, -1); if (child == NULL) device_printf(dev, "missing phy %u device %s\n", priv->phy_id, priv->phy_device); } } if (priv->miibus == NULL) { ifmedia_init(&priv->media, 0, octe_medchange, octe_medstat); ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO); } /* * XXX * We don't support programming the multicast filter right now, although it * ought to be easy enough. (Presumably it's just a matter of putting * multicast addresses in the CAM?) */ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI; ifp->if_init = octe_init; ifp->if_ioctl = octe_ioctl; priv->if_flags = ifp->if_flags; mtx_init(&priv->tx_mtx, ifp->if_xname, "octe tx send queue", MTX_DEF); for (qos = 0; qos < 16; qos++) { mtx_init(&priv->tx_free_queue[qos].ifq_mtx, ifp->if_xname, "octe tx free queue", MTX_DEF); IFQ_SET_MAXLEN(&priv->tx_free_queue[qos], MAX_OUT_QUEUE_DEPTH); } ether_ifattach(ifp, priv->mac); ifp->if_transmit = octe_transmit; ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; ifp->if_capenable = ifp->if_capabilities; ifp->if_hwassist = CSUM_TCP | CSUM_UDP; OCTE_TX_LOCK(priv); IFQ_SET_MAXLEN(&ifp->if_snd, MAX_OUT_QUEUE_DEPTH); ifp->if_snd.ifq_drv_maxlen = MAX_OUT_QUEUE_DEPTH; IFQ_SET_READY(&ifp->if_snd); OCTE_TX_UNLOCK(priv); return (bus_generic_attach(dev)); }
static int t4_cloner_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) { struct match_rr mrr; struct adapter *sc; struct ifnet *ifp; int rc, unit; const uint8_t lla[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; mrr.name = name; mrr.lock = 1; mrr.sc = NULL; mrr.rc = ENOENT; t4_iterate(match_name, &mrr); if (mrr.rc != 0) return (mrr.rc); sc = mrr.sc; KASSERT(sc != NULL, ("%s: name (%s) matched but softc is NULL", __func__, name)); ASSERT_SYNCHRONIZED_OP(sc); sx_xlock(&t4_trace_lock); if (sc->ifp != NULL) { rc = EEXIST; goto done; } if (sc->traceq < 0) { rc = EAGAIN; goto done; } unit = -1; rc = ifc_alloc_unit(ifc, &unit); if (rc != 0) goto done; ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { ifc_free_unit(ifc, unit); rc = ENOMEM; goto done; } /* Note that if_xname is not <if_dname><if_dunit>. */ strlcpy(ifp->if_xname, name, sizeof(ifp->if_xname)); ifp->if_dname = t4_cloner_name; ifp->if_dunit = unit; ifp->if_init = tracer_init; ifp->if_flags = IFF_SIMPLEX | IFF_DRV_RUNNING; ifp->if_ioctl = tracer_ioctl; ifp->if_transmit = tracer_transmit; ifp->if_qflush = tracer_qflush; ifp->if_capabilities = IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; ifmedia_init(&sc->media, IFM_IMASK, tracer_media_change, tracer_media_status); ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE); ether_ifattach(ifp, lla); mtx_lock(&sc->ifp_lock); ifp->if_softc = sc; sc->ifp = ifp; mtx_unlock(&sc->ifp_lock); done: sx_xunlock(&t4_trace_lock); end_synchronized_op(sc, 0); return (rc); }
int ex_attach(device_t dev) { struct ex_softc * sc = device_get_softc(dev); struct ifnet * ifp; struct ifmedia * ifm; int error; uint16_t temp; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return (ENOSPC); } /* work out which set of irq <-> internal tables to use */ if (ex_card_type(sc->enaddr) == CARD_TYPE_EX_10_PLUS) { sc->irq2ee = plus_irq2eemap; sc->ee2irq = plus_ee2irqmap; } else { sc->irq2ee = irq2eemap; sc->ee2irq = ee2irqmap; } sc->mem_size = CARD_RAM_SIZE; /* XXX This should be read from the card itself. */ /* * Initialize the ifnet structure. */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; ifp->if_start = ex_start; ifp->if_ioctl = ex_ioctl; ifp->if_init = ex_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifmedia_init(&sc->ifmedia, 0, ex_ifmedia_upd, ex_ifmedia_sts); mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->timer, &sc->lock, 0); temp = ex_eeprom_read(sc, EE_W5); if (temp & EE_W5_PORT_TPE) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); if (temp & EE_W5_PORT_BNC) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL); if (temp & EE_W5_PORT_AUI) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_NONE, 0, NULL); ifmedia_set(&sc->ifmedia, ex_get_media(sc)); ifm = &sc->ifmedia; ifm->ifm_media = ifm->ifm_cur->ifm_media; ex_ifmedia_upd(ifp); /* * Attach the interface. */ ether_ifattach(ifp, sc->enaddr); error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, ex_intr, (void *)sc, &sc->ih); if (error) { device_printf(dev, "bus_setup_intr() failed!\n"); ether_ifdetach(ifp); mtx_destroy(&sc->lock); return (error); } return(0); }
/* * Attach an EPIC interface to the system. */ void epic_attach(struct epic_softc *sc, const char *intrstr) { bus_space_tag_t st = sc->sc_st; bus_space_handle_t sh = sc->sc_sh; struct ifnet *ifp = &sc->sc_arpcom.ac_if; int rseg, error, miiflags; u_int i; bus_dma_segment_t seg; u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1]; u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6]; char *nullbuf; timeout_set(&sc->sc_mii_timeout, epic_tick, sc); /* * Allocate the control data structures, and create and load the * DMA map for it. */ if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct epic_control_data) + ETHER_PAD_LEN, PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { printf(": unable to allocate control data, error = %d\n", error); goto fail_0; } if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct epic_control_data) + ETHER_PAD_LEN, (caddr_t *)&sc->sc_control_data, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { printf(": unable to map control data, error = %d\n", error); goto fail_1; } nullbuf = (char *)sc->sc_control_data + sizeof(struct epic_control_data); memset(nullbuf, 0, ETHER_PAD_LEN); if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct epic_control_data), 1, sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT, &sc->sc_cddmamap)) != 0) { printf(": unable to create control data DMA map, error = %d\n", error); goto fail_2; } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, sc->sc_control_data, sizeof(struct epic_control_data), NULL, BUS_DMA_NOWAIT)) != 0) { printf(": unable to load control data DMA map, error = %d\n", error); goto fail_3; } /* * Create the transmit buffer DMA maps. */ for (i = 0; i < EPIC_NTXDESC; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) { printf(": unable to create tx DMA map %d, error = %d\n", i, error); goto fail_4; } } /* * Create the receive buffer DMA maps. */ for (i = 0; i < EPIC_NRXDESC; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) { printf(": unable to create rx DMA map %d, error = %d\n", i, error); goto fail_5; } EPIC_DSRX(sc, i)->ds_mbuf = NULL; } /* * create and map the pad buffer */ if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1, ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) { printf(": unable to create pad buffer DMA map, error = %d\n", error); goto fail_5; } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap, nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) { printf(": unable to load pad buffer DMA map, error = %d\n", error); goto fail_6; } bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN, BUS_DMASYNC_PREWRITE); /* * Bring the chip out of low-power mode and reset it to a known state. */ bus_space_write_4(st, sh, EPIC_GENCTL, 0); epic_reset(sc); /* * Read the Ethernet address from the EEPROM. */ epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea); for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) { enaddr[i * 2] = myea[i] & 0xff; enaddr[i * 2 + 1] = myea[i] >> 8; } /* * ...and the device name. */ epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])), mydevname); for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) { devname[i * 2] = mydevname[i] & 0xff; devname[i * 2 + 1] = mydevname[i] >> 8; } devname[sizeof(devname) - 1] = ' '; for (i = sizeof(devname) - 1; devname[i] == ' '; i--) { devname[i] = '\0'; if (i == 0) break; } printf(", %s : %s, address %s\n", devname, intrstr, ether_sprintf(enaddr)); miiflags = 0; if (sc->sc_hwflags & EPIC_HAS_MII_FIBER) miiflags |= MIIF_HAVEFIBER; /* * Initialize our media structures and probe the MII. */ sc->sc_mii.mii_ifp = ifp; sc->sc_mii.mii_readreg = epic_mii_read; sc->sc_mii.mii_writereg = epic_mii_write; sc->sc_mii.mii_statchg = epic_statchg; ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, epic_mediachange, epic_mediastatus); mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, miiflags); if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); } else ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); if (sc->sc_hwflags & EPIC_HAS_BNC) { /* use the next free media instance */ sc->sc_serinst = sc->sc_mii.mii_instance++; ifmedia_add(&sc->sc_mii.mii_media, IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->sc_serinst), 0, NULL); } else sc->sc_serinst = -1; bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = epic_ioctl; ifp->if_start = epic_start; ifp->if_watchdog = epic_watchdog; IFQ_SET_MAXLEN(&ifp->if_snd, EPIC_NTXDESC - 1); IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities = IFCAP_VLAN_MTU; /* * Attach the interface. */ if_attach(ifp); ether_ifattach(ifp); return; /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_6: bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap); fail_5: for (i = 0; i < EPIC_NRXDESC; i++) { if (EPIC_DSRX(sc, i)->ds_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, EPIC_DSRX(sc, i)->ds_dmamap); } fail_4: for (i = 0; i < EPIC_NTXDESC; i++) { if (EPIC_DSTX(sc, i)->ds_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, EPIC_DSTX(sc, i)->ds_dmamap); } bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); fail_3: bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); fail_2: bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, sizeof(struct epic_control_data)); fail_1: bus_dmamem_free(sc->sc_dmat, &seg, rseg); fail_0: return; }
void beattach(struct device *parent, struct device *self, void *aux) { struct sbus_attach_args *sa = aux; struct qec_softc *qec = (struct qec_softc *)parent; struct be_softc *sc = (struct be_softc *)self; struct ifnet *ifp = &sc->sc_arpcom.ac_if; struct mii_data *mii = &sc->sc_mii; struct mii_softc *child; int node = sa->sa_node; bus_dma_tag_t dmatag = sa->sa_dmatag; bus_dma_segment_t seg; bus_size_t size; int instance; int rseg, error; u_int32_t v; extern void myetheraddr(u_char *); /* Pass on the bus tags */ sc->sc_bustag = sa->sa_bustag; sc->sc_dmatag = sa->sa_dmatag; if (sa->sa_nreg < 3) { printf("%s: only %d register sets\n", self->dv_xname, sa->sa_nreg); return; } if (sbus_bus_map(sa->sa_bustag, sa->sa_reg[0].sbr_slot, (bus_addr_t)sa->sa_reg[0].sbr_offset, (bus_size_t)sa->sa_reg[0].sbr_size, 0, 0, &sc->sc_cr) != 0) { printf("beattach: cannot map registers\n"); return; } if (sbus_bus_map(sa->sa_bustag, sa->sa_reg[1].sbr_slot, (bus_addr_t)sa->sa_reg[1].sbr_offset, (bus_size_t)sa->sa_reg[1].sbr_size, 0, 0, &sc->sc_br) != 0) { printf("beattach: cannot map registers\n"); return; } if (sbus_bus_map(sa->sa_bustag, sa->sa_reg[2].sbr_slot, (bus_addr_t)sa->sa_reg[2].sbr_offset, (bus_size_t)sa->sa_reg[2].sbr_size, 0, 0, &sc->sc_tr) != 0) { printf("beattach: cannot map registers\n"); return; } sc->sc_qec = qec; sc->sc_qr = qec->sc_regs; sc->sc_rev = getpropint(node, "board-version", -1); printf(" rev %x", sc->sc_rev); bestop(sc); sc->sc_channel = getpropint(node, "channel#", -1); if (sc->sc_channel == -1) sc->sc_channel = 0; sc->sc_burst = getpropint(node, "burst-sizes", -1); if (sc->sc_burst == -1) sc->sc_burst = qec->sc_burst; /* Clamp at parent's burst sizes */ sc->sc_burst &= qec->sc_burst; /* Establish interrupt handler */ if (sa->sa_nintr == 0 || bus_intr_establish(sa->sa_bustag, sa->sa_pri, IPL_NET, 0, beintr, sc, self->dv_xname) == NULL) { printf(": no interrupt established\n"); return; } myetheraddr(sc->sc_arpcom.ac_enaddr); printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); /* * Allocate descriptor ring and buffers. */ /* for now, allocate as many bufs as there are ring descriptors */ sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE; sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE; size = QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + sc->sc_rb.rb_ntbuf * BE_PKT_BUF_SZ + sc->sc_rb.rb_nrbuf * BE_PKT_BUF_SZ; /* Get a DMA handle */ if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { printf("%s: DMA map create error %d\n", self->dv_xname, error); return; } /* Allocate DMA buffer */ if ((error = bus_dmamem_alloc(sa->sa_dmatag, size, 0, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { printf("%s: DMA buffer alloc error %d\n", self->dv_xname, error); return; } /* Map DMA memory in CPU addressable space */ if ((error = bus_dmamem_map(sa->sa_dmatag, &seg, rseg, size, &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { printf("%s: DMA buffer map error %d\n", self->dv_xname, error); bus_dmamem_free(sa->sa_dmatag, &seg, rseg); return; } /* Load the buffer */ if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, sc->sc_rb.rb_membase, size, NULL, BUS_DMA_NOWAIT)) != 0) { printf("%s: DMA buffer map load error %d\n", self->dv_xname, error); bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size); bus_dmamem_free(dmatag, &seg, rseg); return; } sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; /* * Initialize our media structures and MII info. */ mii->mii_ifp = ifp; mii->mii_readreg = be_mii_readreg; mii->mii_writereg = be_mii_writereg; mii->mii_statchg = be_mii_statchg; ifmedia_init(&mii->mii_media, 0, be_ifmedia_upd, be_ifmedia_sts); timeout_set(&sc->sc_tick_ch, be_tick, sc); /* * Initialize transceiver and determine which PHY connection to use. */ be_mii_sync(sc); v = bus_space_read_4(sc->sc_bustag, sc->sc_tr, BE_TRI_MGMTPAL); instance = 0; if ((v & MGMT_PAL_EXT_MDIO) != 0) { mii_attach(&sc->sc_dev, mii, 0xffffffff, BE_PHY_EXTERNAL, MII_OFFSET_ANY, 0); child = LIST_FIRST(&mii->mii_phys); if (child == NULL) { /* No PHY attached */ ifmedia_add(&sc->sc_media, IFM_MAKEWORD(IFM_ETHER,IFM_NONE,0,instance), 0, NULL); ifmedia_set(&sc->sc_media, IFM_MAKEWORD(IFM_ETHER,IFM_NONE,0,instance)); } else { /* * Note: we support just one PHY on the external * MII connector. */ #ifdef DIAGNOSTIC if (LIST_NEXT(child, mii_list) != NULL) { printf("%s: spurious MII device %s attached\n", sc->sc_dev.dv_xname, child->mii_dev.dv_xname); } #endif if (child->mii_phy != BE_PHY_EXTERNAL || child->mii_inst > 0) { printf("%s: cannot accommodate MII device %s" " at phy %d, instance %d\n", sc->sc_dev.dv_xname, child->mii_dev.dv_xname, child->mii_phy, child->mii_inst); } else { sc->sc_phys[instance] = child->mii_phy; } /* * XXX - we can really do the following ONLY if the * phy indeed has the auto negotiation capability!! */ ifmedia_set(&sc->sc_media, IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,instance)); /* Mark our current media setting */ be_pal_gate(sc, BE_PHY_EXTERNAL); instance++; } } if ((v & MGMT_PAL_INT_MDIO) != 0) { /* * The be internal phy looks vaguely like MII hardware, * but not enough to be able to use the MII device * layer. Hence, we have to take care of media selection * ourselves. */ sc->sc_mii_inst = instance; sc->sc_phys[instance] = BE_PHY_INTERNAL; /* Use `ifm_data' to store BMCR bits */ ifmedia_add(&sc->sc_media, IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,instance), 0, NULL); ifmedia_add(&sc->sc_media, IFM_MAKEWORD(IFM_ETHER,IFM_100_TX,0,instance), BMCR_S100, NULL); ifmedia_add(&sc->sc_media, IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,instance), 0, NULL); printf("on-board transceiver at %s: 10baseT, 100baseTX, auto\n", self->dv_xname); be_mii_reset(sc, BE_PHY_INTERNAL); /* Only set default medium here if there's no external PHY */ if (instance == 0) { be_pal_gate(sc, BE_PHY_INTERNAL); ifmedia_set(&sc->sc_media, IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,instance)); } else be_mii_writereg((void *)sc, BE_PHY_INTERNAL, MII_BMCR, BMCR_ISO); } bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); ifp->if_softc = sc; ifp->if_start = bestart; ifp->if_ioctl = beioctl; ifp->if_watchdog = bewatchdog; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; IFQ_SET_READY(&ifp->if_snd); /* Attach the interface. */ if_attach(ifp); ether_ifattach(ifp); }
void qeattach(device_t parent, device_t self, void *aux) { struct sbus_attach_args *sa = aux; struct qec_softc *qec = device_private(parent); struct qe_softc *sc = device_private(self); struct ifnet *ifp = &sc->sc_ethercom.ec_if; int node = sa->sa_node; bus_dma_tag_t dmatag = sa->sa_dmatag; bus_dma_segment_t seg; bus_size_t size; int rseg, error; sc->sc_dev = self; if (sa->sa_nreg < 2) { printf("%s: only %d register sets\n", device_xname(self), sa->sa_nreg); return; } if (bus_space_map(sa->sa_bustag, (bus_addr_t)BUS_ADDR( sa->sa_reg[0].oa_space, sa->sa_reg[0].oa_base), (bus_size_t)sa->sa_reg[0].oa_size, 0, &sc->sc_cr) != 0) { aprint_error_dev(self, "cannot map registers\n"); return; } if (bus_space_map(sa->sa_bustag, (bus_addr_t)BUS_ADDR( sa->sa_reg[1].oa_space, sa->sa_reg[1].oa_base), (bus_size_t)sa->sa_reg[1].oa_size, 0, &sc->sc_mr) != 0) { aprint_error_dev(self, "cannot map registers\n"); return; } sc->sc_rev = prom_getpropint(node, "mace-version", -1); printf(" rev %x", sc->sc_rev); sc->sc_bustag = sa->sa_bustag; sc->sc_dmatag = sa->sa_dmatag; sc->sc_qec = qec; sc->sc_qr = qec->sc_regs; sc->sc_channel = prom_getpropint(node, "channel#", -1); sc->sc_burst = qec->sc_burst; qestop(sc); /* Note: no interrupt level passed */ (void)bus_intr_establish(sa->sa_bustag, 0, IPL_NET, qeintr, sc); prom_getether(node, sc->sc_enaddr); /* * Allocate descriptor ring and buffers. */ /* for now, allocate as many bufs as there are ring descriptors */ sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE; sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE; size = QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ + sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ; /* Get a DMA handle */ if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { aprint_error_dev(self, "DMA map create error %d\n", error); return; } /* Allocate DMA buffer */ if ((error = bus_dmamem_alloc(dmatag, size, 0, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { aprint_error_dev(self, "DMA buffer alloc error %d\n", error); return; } /* Map DMA buffer in CPU addressable space */ if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { aprint_error_dev(self, "DMA buffer map error %d\n", error); bus_dmamem_free(dmatag, &seg, rseg); return; } /* Load the buffer */ if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, sc->sc_rb.rb_membase, size, NULL, BUS_DMA_NOWAIT)) != 0) { aprint_error_dev(self, "DMA buffer map load error %d\n", error); bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size); bus_dmamem_free(dmatag, &seg, rseg); return; } sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; /* Initialize media properties */ ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts); ifmedia_add(&sc->sc_ifmedia, IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,0), 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_MAKEWORD(IFM_ETHER,IFM_10_5,0,0), 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,0), 0, NULL); ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); ifp->if_softc = sc; ifp->if_start = qestart; ifp->if_ioctl = qeioctl; ifp->if_watchdog = qewatchdog; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; IFQ_SET_READY(&ifp->if_snd); /* Attach the interface. */ if_attach(ifp); ether_ifattach(ifp, sc->sc_enaddr); printf(" address %s\n", ether_sprintf(sc->sc_enaddr)); }
int ep_attach(struct ep_softc *sc) { struct ifnet * ifp = NULL; struct ifmedia * ifm = NULL; u_short * p; uint8_t ether_addr[ETHER_ADDR_LEN]; int i; sc->gone = 0; ep_get_macaddr(sc, ether_addr); /* * Setup the station address */ p = (u_short*)ether_addr; GO_WINDOW(2); for (i = 0; i < 3; i++) { outw(BASE + EP_W2_ADDR_0 + (i * 2), ntohs(p[i])); } ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = ep_if_start; ifp->if_ioctl = ep_if_ioctl; ifp->if_watchdog = ep_if_watchdog; ifp->if_init = ep_if_init; ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN); ifq_set_ready(&ifp->if_snd); if (!sc->epb.mii_trans) { ifmedia_init(&sc->ifmedia, 0, ep_ifmedia_upd, ep_ifmedia_sts); if (sc->ep_connectors & AUI) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); if (sc->ep_connectors & UTP) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); if (sc->ep_connectors & BNC) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL); if (!sc->ep_connectors) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_NONE, 0, NULL); ifmedia_set(&sc->ifmedia, IFM_ETHER|ep_media2if_media[sc->ep_connector]); ifm = &sc->ifmedia; ifm->ifm_media = ifm->ifm_cur->ifm_media; ep_ifmedia_upd(ifp); } ether_ifattach(ifp, ether_addr, NULL); #ifdef EP_LOCAL_STATS sc->rx_no_first = sc->rx_no_mbuf = sc->rx_bpf_disc = sc->rx_overrunf = sc->rx_overrunl = sc->tx_underrun = 0; #endif EP_FSET(sc, F_RX_FIRST); sc->top = sc->mcur = 0; return 0; }
/* * Setup the media data structures according to the channel and * rate tables. This must be called by the driver after * ieee80211_attach and before most anything else. */ void ieee80211_media_init(struct ifnet *ifp, ifm_change_cb_t media_change, ifm_stat_cb_t media_stat) { #define ADD(_ic, _s, _o) \ ifmedia_add(&(_ic)->ic_media, \ IFM_MAKEWORD(IFM_IEEE80211, (_s), (_o), 0), 0, NULL) struct ieee80211com *ic = (void *)ifp; struct ifmediareq imr; int i, j, mode, rate, maxrate, r; uint64_t mword, mopt; const struct ieee80211_rateset *rs; struct ieee80211_rateset allrates; /* * Do late attach work that must wait for any subclass * (i.e. driver) work such as overriding methods. */ ieee80211_node_lateattach(ifp); /* * Fill in media characteristics. */ ifmedia_init(&ic->ic_media, 0, media_change, media_stat); maxrate = 0; memset(&allrates, 0, sizeof(allrates)); for (mode = IEEE80211_MODE_AUTO; mode < IEEE80211_MODE_MAX; mode++) { static const uint64_t mopts[] = { IFM_AUTO, IFM_IEEE80211_11A, IFM_IEEE80211_11B, IFM_IEEE80211_11G, IFM_IEEE80211_11A | IFM_IEEE80211_TURBO, }; if ((ic->ic_modecaps & (1<<mode)) == 0) continue; mopt = mopts[mode]; ADD(ic, IFM_AUTO, mopt); /* e.g. 11a auto */ #ifndef IEEE80211_STA_ONLY if (ic->ic_caps & IEEE80211_C_IBSS) ADD(ic, IFM_AUTO, mopt | IFM_IEEE80211_IBSS); if (ic->ic_caps & IEEE80211_C_HOSTAP) ADD(ic, IFM_AUTO, mopt | IFM_IEEE80211_HOSTAP); if (ic->ic_caps & IEEE80211_C_AHDEMO) ADD(ic, IFM_AUTO, mopt | IFM_IEEE80211_ADHOC); #endif if (ic->ic_caps & IEEE80211_C_MONITOR) ADD(ic, IFM_AUTO, mopt | IFM_IEEE80211_MONITOR); if (mode == IEEE80211_MODE_AUTO) continue; rs = &ic->ic_sup_rates[mode]; for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i]; mword = ieee80211_rate2media(ic, rate, mode); if (mword == 0) continue; ADD(ic, mword, mopt); #ifndef IEEE80211_STA_ONLY if (ic->ic_caps & IEEE80211_C_IBSS) ADD(ic, mword, mopt | IFM_IEEE80211_IBSS); if (ic->ic_caps & IEEE80211_C_HOSTAP) ADD(ic, mword, mopt | IFM_IEEE80211_HOSTAP); if (ic->ic_caps & IEEE80211_C_AHDEMO) ADD(ic, mword, mopt | IFM_IEEE80211_ADHOC); #endif if (ic->ic_caps & IEEE80211_C_MONITOR) ADD(ic, mword, mopt | IFM_IEEE80211_MONITOR); /* * Add rate to the collection of all rates. */ r = rate & IEEE80211_RATE_VAL; for (j = 0; j < allrates.rs_nrates; j++) if (allrates.rs_rates[j] == r) break; if (j == allrates.rs_nrates) { /* unique, add to the set */ allrates.rs_rates[j] = r; allrates.rs_nrates++; } rate = (rate & IEEE80211_RATE_VAL) / 2; if (rate > maxrate) maxrate = rate; } } for (i = 0; i < allrates.rs_nrates; i++) { mword = ieee80211_rate2media(ic, allrates.rs_rates[i], IEEE80211_MODE_AUTO); if (mword == 0) continue; mword = IFM_SUBTYPE(mword); /* remove media options */ ADD(ic, mword, 0); #ifndef IEEE80211_STA_ONLY if (ic->ic_caps & IEEE80211_C_IBSS) ADD(ic, mword, IFM_IEEE80211_IBSS); if (ic->ic_caps & IEEE80211_C_HOSTAP) ADD(ic, mword, IFM_IEEE80211_HOSTAP); if (ic->ic_caps & IEEE80211_C_AHDEMO) ADD(ic, mword, IFM_IEEE80211_ADHOC); #endif if (ic->ic_caps & IEEE80211_C_MONITOR) ADD(ic, mword, IFM_IEEE80211_MONITOR); } ieee80211_media_status(ifp, &imr); ifmedia_set(&ic->ic_media, imr.ifm_active); if (maxrate) ifp->if_baudrate = IF_Mbps(maxrate); #undef ADD }
void vte_attach(struct device *parent, struct device *self, void *aux) { struct vte_softc *sc = (struct vte_softc *)self; struct pci_attach_args *pa = aux; pci_chipset_tag_t pc = pa->pa_pc; pci_intr_handle_t ih; const char *intrstr; struct ifnet *ifp; pcireg_t memtype; int error = 0; memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VTE_PCI_LOMEM); if (pci_mapreg_map(pa, VTE_PCI_LOMEM, memtype, 0, &sc->sc_mem_bt, &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { printf(": can't map mem space\n"); return; } if (pci_intr_map(pa, &ih) != 0) { printf(": can't map interrupt\n"); goto fail; } /* * Allocate IRQ */ intrstr = pci_intr_string(pc, ih); sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, vte_intr, sc, sc->sc_dev.dv_xname); if (sc->sc_irq_handle == NULL) { printf(": could not establish interrupt"); if (intrstr != NULL) printf(" at %s", intrstr); printf("\n"); goto fail; } printf(": %s", intrstr); sc->sc_dmat = pa->pa_dmat; sc->sc_pct = pa->pa_pc; sc->sc_pcitag = pa->pa_tag; /* Reset the ethernet controller. */ vte_reset(sc); error = vte_dma_alloc(sc); if (error) goto fail; /* Load station address. */ vte_get_macaddr(sc); ifp = &sc->sc_arpcom.ac_if; ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = vte_ioctl; ifp->if_start = vte_start; ifp->if_watchdog = vte_watchdog; IFQ_SET_MAXLEN(&ifp->if_snd, VTE_TX_RING_CNT - 1); IFQ_SET_READY(&ifp->if_snd); bcopy(sc->vte_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); ifp->if_capabilities = IFCAP_VLAN_MTU; printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); /* * Set up MII bus. * BIOS would have initialized VTE_MPSCCR to catch PHY * status changes so driver may be able to extract * configured PHY address. Since it's common to see BIOS * fails to initialize the register(including the sample * board I have), let mii(4) probe it. This is more * reliable than relying on BIOS's initialization. * * Advertising flow control capability to mii(4) was * intentionally disabled due to severe problems in TX * pause frame generation. See vte_rxeof() for more * details. */ sc->sc_miibus.mii_ifp = ifp; sc->sc_miibus.mii_readreg = vte_miibus_readreg; sc->sc_miibus.mii_writereg = vte_miibus_writereg; sc->sc_miibus.mii_statchg = vte_miibus_statchg; ifmedia_init(&sc->sc_miibus.mii_media, 0, vte_mediachange, vte_mediastatus); mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL); ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); } else ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); if_attach(ifp); ether_ifattach(ifp); timeout_set(&sc->vte_tick_ch, vte_tick, sc); return; fail: vte_detach(&sc->sc_dev, 0); }
int dme_attach(struct dme_softc *sc, const uint8_t *enaddr) { struct ifnet *ifp = &sc->sc_ethercom.ec_if; uint8_t b[2]; uint16_t io_mode; dme_read_c(sc, DM9000_VID0, b, 2); #if BYTE_ORDER == BIG_ENDIAN sc->sc_vendor_id = (b[0] << 8) | b[1]; #else sc->sc_vendor_id = b[0] | (b[1] << 8); #endif dme_read_c(sc, DM9000_PID0, b, 2); #if BYTE_ORDER == BIG_ENDIAN sc->sc_product_id = (b[0] << 8) | b[1]; #else sc->sc_product_id = b[0] | (b[1] << 8); #endif /* TODO: Check the vendor ID as well */ if (sc->sc_product_id != 0x9000) { panic("dme_attach: product id mismatch (0x%hx != 0x9000)", sc->sc_product_id); } /* Initialize ifnet structure. */ strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); ifp->if_softc = sc; ifp->if_start = dme_start_output; ifp->if_init = dme_init; ifp->if_ioctl = dme_ioctl; ifp->if_stop = dme_stop; ifp->if_watchdog = NULL; /* no watchdog at this stage */ ifp->if_flags = IFF_SIMPLEX | IFF_NOTRAILERS | IFF_BROADCAST | IFF_MULTICAST; IFQ_SET_READY(&ifp->if_snd); /* Initialize ifmedia structures. */ ifmedia_init(&sc->sc_media, 0, dme_mediachange, dme_mediastatus); ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); if (enaddr != NULL) memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr)); /* TODO: Support an EEPROM attached to the DM9000 chip */ callout_init(&sc->sc_link_callout, 0); callout_setfunc(&sc->sc_link_callout, dme_phy_check_link, sc); sc->sc_media_status = 0; /* Configure DM9000 with the MAC address */ dme_write_c(sc, DM9000_PAB0, sc->sc_enaddr, 6); #ifdef DM9000_DEBUG { uint8_t macAddr[6]; dme_read_c(sc, DM9000_PAB0, macAddr, 6); printf("DM9000 configured with MAC address: "); for (int i = 0; i < 6; i++) { printf("%02X:", macAddr[i]); } printf("\n"); } #endif if_attach(ifp); ether_ifattach(ifp, sc->sc_enaddr); #ifdef DM9000_DEBUG { uint8_t network_state; network_state = dme_read(sc, DM9000_NSR); printf("DM9000 Link status: "); if (network_state & DM9000_NSR_LINKST) { if (network_state & DM9000_NSR_SPEED) printf("10Mbps"); else printf("100Mbps"); } else { printf("Down"); } printf("\n"); } #endif io_mode = (dme_read(sc, DM9000_ISR) & DM9000_IOMODE_MASK) >> DM9000_IOMODE_SHIFT; if (io_mode != DM9000_MODE_16BIT ) panic("DM9000: Only 16-bit mode is supported!\n"); DPRINTF(("DM9000 Operation Mode: ")); switch( io_mode) { case DM9000_MODE_16BIT: DPRINTF(("16-bit mode")); sc->sc_data_width = 2; sc->sc_pkt_write = dme_pkt_write_2; sc->sc_pkt_read = dme_pkt_read_2; break; case DM9000_MODE_32BIT: DPRINTF(("32-bit mode")); sc->sc_data_width = 4; break; case DM9000_MODE_8BIT: DPRINTF(("8-bit mode")); sc->sc_data_width = 1; break; default: DPRINTF(("Invalid mode")); break; } DPRINTF(("\n")); callout_schedule(&sc->sc_link_callout, mstohz(2000)); return 0; }
void cpsw_attach(struct device *parent, struct device *self, void *aux) { struct cpsw_softc *sc = (struct cpsw_softc *)self; struct armv7_attach_args *aa = aux; struct arpcom * const ac = &sc->sc_ac; struct ifnet * const ifp = &ac->ac_if; u_int32_t idver; int error; u_int i; timeout_set(&sc->sc_tick, cpsw_tick, sc); cpsw_get_mac_addr(sc); sc->sc_rxthih = arm_intr_establish(aa->aa_dev->irq[0] + CPSW_INTROFF_RXTH, IPL_NET, cpsw_rxthintr, sc, DEVNAME(sc)); sc->sc_rxih = arm_intr_establish(aa->aa_dev->irq[0] + CPSW_INTROFF_RX, IPL_NET, cpsw_rxintr, sc, DEVNAME(sc)); sc->sc_txih = arm_intr_establish(aa->aa_dev->irq[0] + CPSW_INTROFF_TX, IPL_NET, cpsw_txintr, sc, DEVNAME(sc)); sc->sc_miscih = arm_intr_establish(aa->aa_dev->irq[0] + CPSW_INTROFF_MISC, IPL_NET, cpsw_miscintr, sc, DEVNAME(sc)); sc->sc_bst = aa->aa_iot; sc->sc_bdt = aa->aa_dmat; error = bus_space_map(sc->sc_bst, aa->aa_dev->mem[0].addr, aa->aa_dev->mem[0].size, 0, &sc->sc_bsh); if (error) { printf("can't map registers: %d\n", error); return; } sc->sc_txdescs_pa = aa->aa_dev->mem[0].addr + CPSW_CPPI_RAM_TXDESCS_BASE; error = bus_space_subregion(sc->sc_bst, sc->sc_bsh, CPSW_CPPI_RAM_TXDESCS_BASE, CPSW_CPPI_RAM_TXDESCS_SIZE, &sc->sc_bsh_txdescs); if (error) { printf("can't subregion tx ring SRAM: %d\n", error); return; } sc->sc_rxdescs_pa = aa->aa_dev->mem[0].addr + CPSW_CPPI_RAM_RXDESCS_BASE; error = bus_space_subregion(sc->sc_bst, sc->sc_bsh, CPSW_CPPI_RAM_RXDESCS_BASE, CPSW_CPPI_RAM_RXDESCS_SIZE, &sc->sc_bsh_rxdescs); if (error) { printf("can't subregion rx ring SRAM: %d\n", error); return; } sc->sc_rdp = malloc(sizeof(*sc->sc_rdp), M_TEMP, M_WAITOK); KASSERT(sc->sc_rdp != NULL); for (i = 0; i < CPSW_NTXDESCS; i++) { if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, CPSW_TXFRAGS, MCLBYTES, 0, 0, &sc->sc_rdp->tx_dm[i])) != 0) { printf("unable to create tx DMA map: %d\n", error); } sc->sc_rdp->tx_mb[i] = NULL; } for (i = 0; i < CPSW_NRXDESCS; i++) { if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, 1, MCLBYTES, 0, 0, &sc->sc_rdp->rx_dm[i])) != 0) { printf("unable to create rx DMA map: %d\n", error); } sc->sc_rdp->rx_mb[i] = NULL; } sc->sc_txpad = dma_alloc(ETHER_MIN_LEN, PR_WAITOK | PR_ZERO); KASSERT(sc->sc_txpad != NULL); bus_dmamap_create(sc->sc_bdt, ETHER_MIN_LEN, 1, ETHER_MIN_LEN, 0, BUS_DMA_WAITOK, &sc->sc_txpad_dm); bus_dmamap_load(sc->sc_bdt, sc->sc_txpad_dm, sc->sc_txpad, ETHER_MIN_LEN, NULL, BUS_DMA_WAITOK|BUS_DMA_WRITE); bus_dmamap_sync(sc->sc_bdt, sc->sc_txpad_dm, 0, ETHER_MIN_LEN, BUS_DMASYNC_PREWRITE); idver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_IDVER); printf(": version %d.%d (%d), address %s\n", CPSW_SS_IDVER_MAJ(idver), CPSW_SS_IDVER_MIN(idver), CPSW_SS_IDVER_RTL(idver), ether_sprintf(ac->ac_enaddr)); ifp->if_softc = sc; ifp->if_capabilities = 0; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = cpsw_start; ifp->if_ioctl = cpsw_ioctl; ifp->if_watchdog = cpsw_watchdog; IFQ_SET_MAXLEN(&ifp->if_snd, CPSW_NTXDESCS - 1); IFQ_SET_READY(&ifp->if_snd); memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); cpsw_stop(ifp); sc->sc_mii.mii_ifp = ifp; sc->sc_mii.mii_readreg = cpsw_mii_readreg; sc->sc_mii.mii_writereg = cpsw_mii_writereg; sc->sc_mii.mii_statchg = cpsw_mii_statchg; ifmedia_init(&sc->sc_mii.mii_media, 0, cpsw_mediachange, cpsw_mediastatus); mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { printf("no PHY found!\n"); ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL); ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); } else { ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); } if_attach(ifp); ether_ifattach(ifp); return; }
void bmac_attach(struct device *parent, struct device *self, void *aux) { struct confargs *ca = aux; struct bmac_softc *sc = (void *)self; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii = &sc->sc_mii; u_char laddr[6]; int nseg, error; timeout_set(&sc->sc_tick_ch, bmac_mii_tick, sc); sc->sc_flags =0; if (strcmp(ca->ca_name, "ethernet") == 0) { sc->sc_flags |= BMAC_BMACPLUS; } ca->ca_reg[0] += ca->ca_baseaddr; ca->ca_reg[2] += ca->ca_baseaddr; ca->ca_reg[4] += ca->ca_baseaddr; sc->sc_regs = (vaddr_t)mapiodev(ca->ca_reg[0], NBPG); bmac_write_reg(sc, INTDISABLE, NoEventsMask); if (OF_getprop(ca->ca_node, "local-mac-address", laddr, 6) == -1 && OF_getprop(ca->ca_node, "mac-address", laddr, 6) == -1) { printf(": cannot get mac-address\n"); return; } bcopy(laddr, sc->arpcom.ac_enaddr, 6); sc->sc_dmat = ca->ca_dmat; sc->sc_txdma = mapiodev(ca->ca_reg[2], 0x100); sc->sc_rxdma = mapiodev(ca->ca_reg[4], 0x100); sc->sc_txdbdma = dbdma_alloc(sc->sc_dmat, BMAC_TXBUFS); sc->sc_txcmd = sc->sc_txdbdma->d_addr; sc->sc_rxdbdma = dbdma_alloc(sc->sc_dmat, BMAC_RXBUFS + 1); sc->sc_rxcmd = sc->sc_rxdbdma->d_addr; error = bus_dmamem_alloc(sc->sc_dmat, BMAC_BUFSZ, PAGE_SIZE, 0, sc->sc_bufseg, 1, &nseg, BUS_DMA_NOWAIT); if (error) { printf(": cannot allocate buffers (%d)\n", error); return; } error = bus_dmamem_map(sc->sc_dmat, sc->sc_bufseg, nseg, BMAC_BUFSZ, &sc->sc_txbuf, BUS_DMA_NOWAIT); if (error) { printf(": cannot map buffers (%d)\n", error); bus_dmamem_free(sc->sc_dmat, sc->sc_bufseg, 1); return; } error = bus_dmamap_create(sc->sc_dmat, BMAC_BUFSZ, 1, BMAC_BUFSZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_bufmap); if (error) { printf(": cannot create buffer dmamap (%d)\n", error); bus_dmamem_unmap(sc->sc_dmat, sc->sc_txbuf, BMAC_BUFSZ); bus_dmamem_free(sc->sc_dmat, sc->sc_bufseg, 1); return; } error = bus_dmamap_load(sc->sc_dmat, sc->sc_bufmap, sc->sc_txbuf, BMAC_BUFSZ, NULL, BUS_DMA_NOWAIT); if (error) { printf(": cannot load buffers dmamap (%d)\n", error); bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufmap); bus_dmamem_unmap(sc->sc_dmat, sc->sc_txbuf, BMAC_BUFSZ); bus_dmamem_free(sc->sc_dmat, sc->sc_bufseg, nseg); return; } sc->sc_txbuf_pa = sc->sc_bufmap->dm_segs->ds_addr; sc->sc_rxbuf = sc->sc_txbuf + BMAC_BUFLEN * BMAC_TXBUFS; sc->sc_rxbuf_pa = sc->sc_txbuf_pa + BMAC_BUFLEN * BMAC_TXBUFS; printf(" irq %d,%d: address %s\n", ca->ca_intr[0], ca->ca_intr[2], ether_sprintf(laddr)); mac_intr_establish(parent, ca->ca_intr[0], IST_LEVEL, IPL_NET, bmac_intr, sc, "bmac intr"); mac_intr_establish(parent, ca->ca_intr[2], IST_LEVEL, IPL_NET, bmac_rint, sc, "bmac rint"); bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); ifp->if_softc = sc; ifp->if_ioctl = bmac_ioctl; ifp->if_start = bmac_start; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; ifp->if_watchdog = bmac_watchdog; IFQ_SET_READY(&ifp->if_snd); mii->mii_ifp = ifp; mii->mii_readreg = bmac_mii_readreg; mii->mii_writereg = bmac_mii_writereg; mii->mii_statchg = bmac_mii_statchg; ifmedia_init(&mii->mii_media, 0, bmac_mediachange, bmac_mediastatus); mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); /* Choose a default media. */ if (LIST_FIRST(&mii->mii_phys) == NULL) { ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_10_T); } else ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO); bmac_reset_chip(sc); if_attach(ifp); ether_ifattach(ifp); }
void iee_attach(struct iee_softc *sc, uint8_t *eth_addr, int *media, int nmedia, int defmedia) { struct ifnet *ifp = &sc->sc_ethercom.ec_if; int n; KASSERT(sc->sc_cl_align > 0 && powerof2(sc->sc_cl_align)); /* * Calculate DMA descriptor offsets and sizes in shmem * which should be cache line aligned. */ sc->sc_scp_off = 0; sc->sc_scp_sz = roundup2(sizeof(struct iee_scp), sc->sc_cl_align); sc->sc_iscp_off = sc->sc_scp_sz; sc->sc_iscp_sz = roundup2(sizeof(struct iee_iscp), sc->sc_cl_align); sc->sc_scb_off = sc->sc_iscp_off + sc->sc_iscp_sz; sc->sc_scb_sz = roundup2(sizeof(struct iee_scb), sc->sc_cl_align); sc->sc_rfd_off = sc->sc_scb_off + sc->sc_scb_sz; sc->sc_rfd_sz = roundup2(sizeof(struct iee_rfd), sc->sc_cl_align); sc->sc_rbd_off = sc->sc_rfd_off + sc->sc_rfd_sz * IEE_NRFD; sc->sc_rbd_sz = roundup2(sizeof(struct iee_rbd), sc->sc_cl_align); sc->sc_cb_off = sc->sc_rbd_off + sc->sc_rbd_sz * IEE_NRFD; sc->sc_cb_sz = roundup2(sizeof(struct iee_cb), sc->sc_cl_align); sc->sc_tbd_off = sc->sc_cb_off + sc->sc_cb_sz * IEE_NCB; sc->sc_tbd_sz = roundup2(sizeof(struct iee_tbd), sc->sc_cl_align); sc->sc_shmem_sz = sc->sc_tbd_off + sc->sc_tbd_sz * IEE_NTBD * IEE_NCB; /* allocate memory for shared DMA descriptors */ if (bus_dmamem_alloc(sc->sc_dmat, sc->sc_shmem_sz, PAGE_SIZE, 0, &sc->sc_dma_segs, 1, &sc->sc_dma_rsegs, BUS_DMA_NOWAIT) != 0) { aprint_error(": can't allocate %d bytes of DMA memory\n", sc->sc_shmem_sz); return; } if (bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_segs, sc->sc_dma_rsegs, sc->sc_shmem_sz, (void **)&sc->sc_shmem_addr, BUS_DMA_COHERENT | BUS_DMA_NOWAIT) != 0) { aprint_error(": can't map DMA memory\n"); bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_segs, sc->sc_dma_rsegs); return; } if (bus_dmamap_create(sc->sc_dmat, sc->sc_shmem_sz, sc->sc_dma_rsegs, sc->sc_shmem_sz, 0, BUS_DMA_NOWAIT, &sc->sc_shmem_map) != 0) { aprint_error(": can't create DMA map\n"); bus_dmamem_unmap(sc->sc_dmat, sc->sc_shmem_addr, sc->sc_shmem_sz); bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_segs, sc->sc_dma_rsegs); return; } if (bus_dmamap_load(sc->sc_dmat, sc->sc_shmem_map, sc->sc_shmem_addr, sc->sc_shmem_sz, NULL, BUS_DMA_NOWAIT) != 0) { aprint_error(": can't load DMA map\n"); bus_dmamap_destroy(sc->sc_dmat, sc->sc_shmem_map); bus_dmamem_unmap(sc->sc_dmat, sc->sc_shmem_addr, sc->sc_shmem_sz); bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_segs, sc->sc_dma_rsegs); return; } memset(sc->sc_shmem_addr, 0, sc->sc_shmem_sz); /* Set pointer to Intermediate System Configuration Pointer. */ /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */ SC_SCP(sc)->scp_iscp_addr = IEE_SWAP32(IEE_PHYS_SHMEM(sc->sc_iscp_off)); SC_SCP(sc)->scp_sysbus = sc->sc_sysbus; /* Set pointer to System Control Block. */ /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */ SC_ISCP(sc)->iscp_scb_addr = IEE_SWAP32(IEE_PHYS_SHMEM(sc->sc_scb_off)); /* Set pointer to Receive Frame Area. (physical address) */ SC_SCB(sc)->scb_rfa_addr = IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rfd_off)); /* Set pointer to Command Block. (physical address) */ SC_SCB(sc)->scb_cmd_blk_addr = IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_cb_off)); bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, sc->sc_shmem_sz, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); ifmedia_init(&sc->sc_ifmedia, 0, iee_mediachange, iee_mediastatus); if (media != NULL) { for (n = 0 ; n < nmedia ; n++) ifmedia_add(&sc->sc_ifmedia, media[n], 0, NULL); ifmedia_set(&sc->sc_ifmedia, defmedia); } else { ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE, 0, NULL); ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE); } ifp->if_softc = sc; strcpy(ifp->if_xname, device_xname(sc->sc_dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = iee_start; /* initiate output routine */ ifp->if_ioctl = iee_ioctl; /* ioctl routine */ ifp->if_init = iee_init; /* init routine */ ifp->if_stop = iee_stop; /* stop routine */ ifp->if_watchdog = iee_watchdog; /* timer routine */ IFQ_SET_READY(&ifp->if_snd); /* iee supports IEEE 802.1Q Virtual LANs, see vlan(4). */ sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; if_attach(ifp); ether_ifattach(ifp, eth_addr); aprint_normal(": Intel 82596%s address %s\n", i82596_typenames[sc->sc_type], ether_sprintf(eth_addr)); for (n = 0 ; n < IEE_NCB ; n++) sc->sc_tx_map[n] = NULL; for (n = 0 ; n < IEE_NRFD ; n++) { sc->sc_rx_mbuf[n] = NULL; sc->sc_rx_map[n] = NULL; } sc->sc_tx_timeout = 0; sc->sc_setup_timeout = 0; (sc->sc_iee_reset)(sc); }
void tap_attach(device_t parent, device_t self, void *aux) { struct tap_softc *sc = device_private(self); struct ifnet *ifp; #if defined(COMPAT_40) || defined(MODULAR) const struct sysctlnode *node; int error; #endif uint8_t enaddr[ETHER_ADDR_LEN] = { 0xf2, 0x0b, 0xa4, 0xff, 0xff, 0xff }; char enaddrstr[3 * ETHER_ADDR_LEN]; sc->sc_dev = self; sc->sc_sih = NULL; getnanotime(&sc->sc_btime); sc->sc_atime = sc->sc_mtime = sc->sc_btime; sc->sc_flags = 0; selinit(&sc->sc_rsel); /* * Initialize the two locks for the device. * * We need a lock here because even though the tap device can be * opened only once, the file descriptor might be passed to another * process, say a fork(2)ed child. * * The Giant saves us from most of the hassle, but since the read * operation can sleep, we don't want two processes to wake up at * the same moment and both try and dequeue a single packet. * * The queue for event listeners (used by kqueue(9), see below) has * to be protected too, so use a spin lock. */ mutex_init(&sc->sc_rdlock, MUTEX_DEFAULT, IPL_NONE); mutex_init(&sc->sc_kqlock, MUTEX_DEFAULT, IPL_VM); if (!pmf_device_register(self, NULL, NULL)) aprint_error_dev(self, "couldn't establish power handler\n"); /* * In order to obtain unique initial Ethernet address on a host, * do some randomisation. It's not meant for anything but avoiding * hard-coding an address. */ cprng_fast(&enaddr[3], 3); aprint_verbose_dev(self, "Ethernet address %s\n", ether_snprintf(enaddrstr, sizeof(enaddrstr), enaddr)); /* * Why 1000baseT? Why not? You can add more. * * Note that there are 3 steps: init, one or several additions to * list of supported media, and in the end, the selection of one * of them. */ ifmedia_init(&sc->sc_im, 0, tap_mediachange, tap_mediastatus); ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T, 0, NULL); ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_set(&sc->sc_im, IFM_ETHER|IFM_AUTO); /* * One should note that an interface must do multicast in order * to support IPv6. */ ifp = &sc->sc_ec.ec_if; strcpy(ifp->if_xname, device_xname(self)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = tap_ioctl; ifp->if_start = tap_start; ifp->if_stop = tap_stop; ifp->if_init = tap_init; IFQ_SET_READY(&ifp->if_snd); sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; /* Those steps are mandatory for an Ethernet driver. */ if_initialize(ifp); ether_ifattach(ifp, enaddr); if_register(ifp); #if defined(COMPAT_40) || defined(MODULAR) /* * Add a sysctl node for that interface. * * The pointer transmitted is not a string, but instead a pointer to * the softc structure, which we can use to build the string value on * the fly in the helper function of the node. See the comments for * tap_sysctl_handler for details. * * Usually sysctl_createv is called with CTL_CREATE as the before-last * component. However, we can allocate a number ourselves, as we are * the only consumer of the net.link.<iface> node. In this case, the * unit number is conveniently used to number the node. CTL_CREATE * would just work, too. */ if ((error = sysctl_createv(NULL, 0, NULL, &node, CTLFLAG_READWRITE, CTLTYPE_STRING, device_xname(self), NULL, tap_sysctl_handler, 0, (void *)sc, 18, CTL_NET, AF_LINK, tap_node, device_unit(sc->sc_dev), CTL_EOL)) != 0) aprint_error_dev(self, "sysctl_createv returned %d, ignoring\n", error); #endif }
void bmac_attach(struct device *parent, struct device *self, void *aux) { struct confargs *ca = aux; struct bmac_softc *sc = (void *)self; struct ifnet *ifp = &sc->sc_if; struct mii_data *mii = &sc->sc_mii; u_char laddr[6]; callout_init(&sc->sc_tick_ch, 0); sc->sc_flags =0; if (strcmp(ca->ca_name, "ethernet") == 0) { char name[64]; memset(name, 0, 64); OF_package_to_path(ca->ca_node, name, sizeof(name)); OF_open(name); sc->sc_flags |= BMAC_BMACPLUS; } ca->ca_reg[0] += ca->ca_baseaddr; ca->ca_reg[2] += ca->ca_baseaddr; ca->ca_reg[4] += ca->ca_baseaddr; sc->sc_iot = ca->ca_tag; if (bus_space_map(sc->sc_iot, ca->ca_reg[0], ca->ca_reg[1], 0, &sc->sc_ioh) != 0) { aprint_error(": couldn't map %#x", ca->ca_reg[0]); return; } bmac_write_reg(sc, INTDISABLE, NoEventsMask); if (OF_getprop(ca->ca_node, "local-mac-address", laddr, 6) == -1 && OF_getprop(ca->ca_node, "mac-address", laddr, 6) == -1) { printf(": cannot get mac-address\n"); return; } memcpy(sc->sc_enaddr, laddr, 6); sc->sc_txdma = mapiodev(ca->ca_reg[2], PAGE_SIZE); sc->sc_rxdma = mapiodev(ca->ca_reg[4], PAGE_SIZE); sc->sc_txcmd = dbdma_alloc(BMAC_TXBUFS * sizeof(dbdma_command_t)); sc->sc_rxcmd = dbdma_alloc((BMAC_RXBUFS + 1) * sizeof(dbdma_command_t)); sc->sc_txbuf = malloc(BMAC_BUFLEN * BMAC_TXBUFS, M_DEVBUF, M_NOWAIT); sc->sc_rxbuf = malloc(BMAC_BUFLEN * BMAC_RXBUFS, M_DEVBUF, M_NOWAIT); if (sc->sc_txbuf == NULL || sc->sc_rxbuf == NULL || sc->sc_txcmd == NULL || sc->sc_rxcmd == NULL) { printf("cannot allocate memory\n"); return; } printf(" irq %d,%d: address %s\n", ca->ca_intr[0], ca->ca_intr[2], ether_sprintf(laddr)); intr_establish(ca->ca_intr[0], IST_EDGE, IPL_NET, bmac_intr, sc); intr_establish(ca->ca_intr[2], IST_EDGE, IPL_NET, bmac_rint, sc); memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); ifp->if_softc = sc; ifp->if_ioctl = bmac_ioctl; ifp->if_start = bmac_start; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; ifp->if_watchdog = bmac_watchdog; IFQ_SET_READY(&ifp->if_snd); mii->mii_ifp = ifp; mii->mii_readreg = bmac_mii_readreg; mii->mii_writereg = bmac_mii_writereg; mii->mii_statchg = bmac_mii_statchg; sc->sc_ethercom.ec_mii = mii; ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); /* Choose a default media. */ if (LIST_FIRST(&mii->mii_phys) == NULL) { ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_10_T); } else ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO); bmac_reset_chip(sc); if_attach(ifp); ether_ifattach(ifp, sc->sc_enaddr); }
/* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ void rtk_attach(struct rtk_softc *sc) { device_t self = sc->sc_dev; struct ifnet *ifp; struct rtk_tx_desc *txd; uint16_t val; uint8_t eaddr[ETHER_ADDR_LEN]; int error; int i, addr_len; callout_init(&sc->rtk_tick_ch, 0); /* * Check EEPROM type 9346 or 9356. */ if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129) addr_len = RTK_EEADDR_LEN1; else addr_len = RTK_EEADDR_LEN0; /* * Get station address. */ val = rtk_read_eeprom(sc, RTK_EE_EADDR0, addr_len); eaddr[0] = val & 0xff; eaddr[1] = val >> 8; val = rtk_read_eeprom(sc, RTK_EE_EADDR1, addr_len); eaddr[2] = val & 0xff; eaddr[3] = val >> 8; val = rtk_read_eeprom(sc, RTK_EE_EADDR2, addr_len); eaddr[4] = val & 0xff; eaddr[5] = val >> 8; if ((error = bus_dmamem_alloc(sc->sc_dmat, RTK_RXBUFLEN + 16, PAGE_SIZE, 0, &sc->sc_dmaseg, 1, &sc->sc_dmanseg, BUS_DMA_NOWAIT)) != 0) { aprint_error_dev(self, "can't allocate recv buffer, error = %d\n", error); goto fail_0; } if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg, RTK_RXBUFLEN + 16, (void **)&sc->rtk_rx_buf, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { aprint_error_dev(self, "can't map recv buffer, error = %d\n", error); goto fail_1; } if ((error = bus_dmamap_create(sc->sc_dmat, RTK_RXBUFLEN + 16, 1, RTK_RXBUFLEN + 16, 0, BUS_DMA_NOWAIT, &sc->recv_dmamap)) != 0) { aprint_error_dev(self, "can't create recv buffer DMA map, error = %d\n", error); goto fail_2; } if ((error = bus_dmamap_load(sc->sc_dmat, sc->recv_dmamap, sc->rtk_rx_buf, RTK_RXBUFLEN + 16, NULL, BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) { aprint_error_dev(self, "can't load recv buffer DMA map, error = %d\n", error); goto fail_3; } for (i = 0; i < RTK_TX_LIST_CNT; i++) { txd = &sc->rtk_tx_descs[i]; if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &txd->txd_dmamap)) != 0) { aprint_error_dev(self, "can't create snd buffer DMA map, error = %d\n", error); goto fail_4; } txd->txd_txaddr = RTK_TXADDR0 + (i * 4); txd->txd_txstat = RTK_TXSTAT0 + (i * 4); } SIMPLEQ_INIT(&sc->rtk_tx_free); SIMPLEQ_INIT(&sc->rtk_tx_dirty); /* * From this point forward, the attachment cannot fail. A failure * before this releases all resources thar may have been * allocated. */ sc->sc_flags |= RTK_ATTACHED; /* Reset the adapter. */ rtk_reset(sc); aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr)); ifp = &sc->ethercom.ec_if; ifp->if_softc = sc; strcpy(ifp->if_xname, device_xname(self)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = rtk_ioctl; ifp->if_start = rtk_start; ifp->if_watchdog = rtk_watchdog; ifp->if_init = rtk_init; ifp->if_stop = rtk_stop; IFQ_SET_READY(&ifp->if_snd); /* * Do ifmedia setup. */ sc->mii.mii_ifp = ifp; sc->mii.mii_readreg = rtk_phy_readreg; sc->mii.mii_writereg = rtk_phy_writereg; sc->mii.mii_statchg = rtk_phy_statchg; sc->ethercom.ec_mii = &sc->mii; ifmedia_init(&sc->mii.mii_media, IFM_IMASK, ether_mediachange, ether_mediastatus); mii_attach(self, &sc->mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); /* Choose a default media. */ if (LIST_FIRST(&sc->mii.mii_phys) == NULL) { ifmedia_add(&sc->mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_NONE); } else { ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_AUTO); } /* * Call MI attach routines. */ if_attach(ifp); ether_ifattach(ifp, eaddr); rnd_attach_source(&sc->rnd_source, device_xname(self), RND_TYPE_NET, RND_FLAG_DEFAULT); return; fail_4: for (i = 0; i < RTK_TX_LIST_CNT; i++) { txd = &sc->rtk_tx_descs[i]; if (txd->txd_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap); } fail_3: bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap); fail_2: bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf, RTK_RXBUFLEN + 16); fail_1: bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg); fail_0: return; }
int ed_probe_RTL80x9(device_t dev, int port_rid, int flags) { struct ed_softc *sc = device_get_softc(dev); char *ts; int error; if ((error = ed_alloc_port(dev, port_rid, ED_NOVELL_IO_PORTS))) return (error); sc->asic_offset = ED_NOVELL_ASIC_OFFSET; sc->nic_offset = ED_NOVELL_NIC_OFFSET; if (ed_nic_inb(sc, ED_P0_CR) & (ED_CR_PS0 | ED_CR_PS1)) ed_nic_outb(sc, ED_P0_CR, ED_CR_RD2 | ED_CR_STP); if (ed_nic_inb(sc, ED_RTL80X9_80X9ID0) != ED_RTL80X9_ID0) return (ENXIO); switch (ed_nic_inb(sc, ED_RTL80X9_80X9ID1)) { case ED_RTL8019_ID1: sc->chip_type = ED_CHIP_TYPE_RTL8019; ts = "RTL8019"; break; case ED_RTL8029_ID1: sc->chip_type = ED_CHIP_TYPE_RTL8029; ts = "RTL8029"; break; default: return (ENXIO); } if ((error = ed_probe_Novell_generic(dev, flags))) return (error); sc->type_str = ts; sc->sc_media_ioctl = &ed_rtl80x9_media_ioctl; ifmedia_init(&sc->ifmedia, 0, ed_rtl_set_media, ed_rtl_get_media); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, 0); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, 0); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_2, 0, 0); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_5, 0, 0); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, 0); ed_nic_outb(sc, ED_P0_CR, ED_CR_RD2 | ED_CR_PAGE_3 | ED_CR_STP); switch (ed_nic_inb(sc, ED_RTL80X9_CONFIG2) & ED_RTL80X9_CF2_MEDIA) { case ED_RTL80X9_CF2_AUTO: ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_AUTO); break; case ED_RTL80X9_CF2_10_5: ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_10_5); break; case ED_RTL80X9_CF2_10_2: ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_10_2); break; case ED_RTL80X9_CF2_10_T: ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_10_T | ((ed_nic_inb(sc, ED_RTL80X9_CONFIG3) & ED_RTL80X9_CF3_FUDUP) ? IFM_FDX : 0)); break; } return (0); }
void tsec_attach(struct device *parent, struct device *self, void *aux) { struct tsec_softc *sc = (void *)self; struct obio_attach_args *oa = aux; struct ifnet *ifp; int phy, n; if (OF_getprop(oa->oa_node, "phy-handle", &phy, sizeof(phy)) == sizeof(phy)) { int node, reg; node = tsec_find_phy(OF_peer(0), phy); if (node == -1 || OF_getprop(node, "reg", ®, sizeof(reg)) != sizeof(reg)) { printf(": can't find PHY\n"); return; } oa->oa_phy = reg; } /* Map registers for TSEC1 & TSEC2 if they're not mapped yet. */ if (oa->oa_iot != tsec_iot) { tsec_iot = oa->oa_iot; if (bus_space_map(tsec_iot, oa->oa_offset & 0xffffc000, 8192, 0, &tsec_ioh)) { printf(": can't map registers\n"); return; } } sc->sc_iot = tsec_iot; sc->sc_dmat = oa->oa_dmat; /* Ethernet Controller registers. */ bus_space_subregion(tsec_iot, tsec_ioh, oa->oa_offset & 0x3fff, 3072, &sc->sc_ioh); /* MII Management registers. */ bus_space_subregion(tsec_iot, tsec_ioh, 0, 3072, &sc->sc_mii_ioh); myetheraddr(sc->sc_lladdr); printf(": address %s\n", ether_sprintf(sc->sc_lladdr)); timeout_set(&sc->sc_tick, tsec_tick, sc); ifp = &sc->sc_ac.ac_if; ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = tsec_ioctl; ifp->if_start = tsec_start; ifp->if_watchdog = tsec_watchdog; IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_NTXDESC - 1); IFQ_SET_READY(&ifp->if_snd); bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); m_clsetwms(ifp, MCLBYTES, 0, TSEC_NRXDESC); ifp->if_capabilities = IFCAP_VLAN_MTU; sc->sc_mii.mii_ifp = ifp; sc->sc_mii.mii_readreg = tsec_mii_readreg; sc->sc_mii.mii_writereg = tsec_mii_writereg; sc->sc_mii.mii_statchg = tsec_mii_statchg; ifmedia_init(&sc->sc_media, 0, tsec_media_change, tsec_media_status); tsec_reset(sc); /* Reset management. */ tsec_write(sc, TSEC_MIIMCFG, TSEC_MIIMCFG_RESET); tsec_write(sc, TSEC_MIIMCFG, 0x00000003); for (n = 0; n < 100; n++) { if ((tsec_read(sc, TSEC_MIIMIND) & TSEC_MIIMIND_BUSY) == 0) break; } mii_attach(self, &sc->sc_mii, 0xffffffff, oa->oa_phy, MII_OFFSET_ANY, 0); if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); } else ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); if_attach(ifp); ether_ifattach(ifp); intr_establish(oa->oa_ivec, IST_LEVEL, IPL_NET, tsec_txintr, sc, sc->sc_dev.dv_xname); intr_establish(oa->oa_ivec + 1, IST_LEVEL, IPL_NET, tsec_rxintr, sc, sc->sc_dev.dv_xname); intr_establish(oa->oa_ivec + 2, IST_LEVEL, IPL_NET, tsec_errintr, sc, sc->sc_dev.dv_xname); }