示例#1
0
int
octeon_eth_mediainit(struct octeon_eth_softc *sc)
{
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	struct mii_softc *child;

	sc->sc_mii.mii_ifp = ifp;
	sc->sc_mii.mii_readreg = octeon_eth_mii_readreg;
	sc->sc_mii.mii_writereg = octeon_eth_mii_writereg;
	sc->sc_mii.mii_statchg = octeon_eth_mii_statchg;
	ifmedia_init(&sc->sc_mii.mii_media, 0, octeon_eth_mediachange,
	    octeon_eth_mediastatus);

	mii_attach(&sc->sc_dev, &sc->sc_mii,
	    0xffffffff, sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);

	child = LIST_FIRST(&sc->sc_mii.mii_phys);
	if (child == NULL) {
                /* No PHY attached. */
		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
			    0, NULL);
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
	} else {
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
	}

	return 0;
}
示例#2
0
void
che_attach(struct device *parent, struct device *self, void *aux)
{
	struct cheg_softc *gsc = (struct cheg_softc *)parent;
	struct che_softc *sc = (struct che_softc *)self;
	struct che_attach_args *caa = aux;
	struct ifnet *ifp;

	sc->sc_cheg = gsc;

	sc->sc_port = caa->caa_port;
	bcopy(caa->caa_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);

	printf(": address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));

	ifp = &sc->sc_ac.ac_if;
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = che_ioctl;
	ifp->if_start = che_start;
	ifp->if_watchdog = che_watchdog;
	ifp->if_hardmtu = MCLBYTES - ETHER_HDR_LEN - ETHER_CRC_LEN; /* XXX */
	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
	IFQ_SET_MAXLEN(&ifp->if_snd, 400);
	IFQ_SET_READY(&ifp->if_snd);

	ifmedia_init(&sc->sc_mii.mii_media, 0,
	    che_ifmedia_upd, che_ifmedia_sts);

	sc->sc_mii.mii_ifp = ifp;
	sc->sc_mii.mii_readreg = che_miibus_ind_readreg;
	sc->sc_mii.mii_writereg = che_miibus_ind_writereg;
	sc->sc_mii.mii_statchg = che_miibus_statchg;

	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, MIIF_DOPAUSE | MIIF_HAVEFIBER);

	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL,
		    0, NULL);
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
	} else
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);

	if_attach(ifp);
	ether_ifattach(ifp);

	return;
}
static void
tlp_cogent_em1x0_tmsw_init(struct tulip_softc *sc)
{
	struct tulip_21x4x_media *tm;
	const char *sep = "";

	sc->sc_gp_dir = GPP_COGENT_EM1x0_PINS;
	sc->sc_opmode = OPMODE_MBO | OPMODE_PS;
	TULIP_WRITE(sc, CSR_OPMODE, sc->sc_opmode);

	ifmedia_init(&sc->sc_mii.mii_media, 0, tlp_mediachange,
	    tlp_mediastatus);
	aprint_normal_dev(sc->sc_dev, "");

#define	ADD(m, c) \
	tm = malloc(sizeof(*tm), M_DEVBUF, M_WAITOK|M_ZERO);		\
	tm->tm_opmode = (c);						\
	tm->tm_gpdata = GPP_COGENT_EM1x0_INIT;				\
	ifmedia_add(&sc->sc_mii.mii_media, (m), 0, tm)
#define	PRINT(str)	aprint_normal("%s%s", sep, str); sep = ", "

	if (sc->sc_srom[32] == 0x15) {
		ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_FX, 0, 0),
		    OPMODE_PS | OPMODE_PCS);
		PRINT("100baseFX");

		ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_FX, IFM_FDX, 0),
		    OPMODE_PS | OPMODE_PCS | OPMODE_FD);
		PRINT("100baseFX-FDX");
		aprint_normal("\n");

		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_100_FX);
	} else {
		ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, 0),
		    OPMODE_PS | OPMODE_PCS | OPMODE_SCR);
		PRINT("100baseTX");

		ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_FX, IFM_FDX, 0),
		    OPMODE_PS | OPMODE_PCS | OPMODE_SCR | OPMODE_FD);
		PRINT("100baseTX-FDX");
		aprint_normal("\n");

		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_100_TX);
	}

#undef ADD
#undef PRINT
}
示例#4
0
文件: dp8390.c 项目: MarginC/kame
/*
 * Standard media init routine for the dp8390.
 */
void
dp8390_media_init(struct dp8390_softc *sc)
{
	ifmedia_init(&sc->sc_media, 0, dp8390_mediachange, dp8390_mediastatus);
	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
	ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
}
示例#5
0
static int
nicvf_setup_ifmedia(struct nicvf *nic)
{

	ifmedia_init(&nic->if_media, IFM_IMASK, nicvf_media_change,
	    nicvf_media_status);

	/*
	 * Advertise availability of all possible connection types,
	 * even though not all are possible at the same time.
	 */

	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10_T | IFM_FDX),
	    0, NULL);
	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_100_TX | IFM_FDX),
	    0, NULL);
	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_1000_T | IFM_FDX),
	    0, NULL);
	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10G_SR | IFM_FDX),
	    0, NULL);
	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_40G_CR4 | IFM_FDX),
	    0, NULL);
	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX),
	    0, NULL);

	ifmedia_set(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX));

	return (0);
}
示例#6
0
/*
 * Activate a vap.  State should have been prepared with a
 * call to ieee80211_vap_setup and by the driver.  On return
 * from this call the vap is ready for use.
 */
int
ieee80211_vap_attach(struct ieee80211vap *vap,
	ifm_change_cb_t media_change, ifm_stat_cb_t media_stat)
{
	struct ifnet *ifp = vap->iv_ifp;
	struct ieee80211com *ic = vap->iv_ic;
	struct ifmediareq imr;
	int maxrate;

	IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
	    "%s: %s parent %s flags 0x%x flags_ext 0x%x\n",
	    __func__, ieee80211_opmode_name[vap->iv_opmode],
	    ic->ic_ifp->if_xname, vap->iv_flags, vap->iv_flags_ext);

	/*
	 * Do late attach work that cannot happen until after
	 * the driver has had a chance to override defaults.
	 */
	ieee80211_node_latevattach(vap);
	ieee80211_power_latevattach(vap);

	maxrate = ieee80211_media_setup(ic, &vap->iv_media, vap->iv_caps,
	    vap->iv_opmode == IEEE80211_M_STA, media_change, media_stat);
	ieee80211_media_status(ifp, &imr);
	/* NB: strip explicit mode; we're actually in autoselect */
	ifmedia_set(&vap->iv_media,
	    imr.ifm_active &~ (IFM_MMASK | IFM_IEEE80211_TURBO));
	if (maxrate)
		ifp->if_baudrate = IF_Mbps(maxrate);

	ether_ifattach(ifp, vap->iv_myaddr, &wlan_global_serializer);
	if (vap->iv_opmode == IEEE80211_M_MONITOR) {
		/* NB: disallow transmit */
#ifdef __FreeBSD__
		ifp->if_transmit = null_transmit;
#endif
		ifp->if_output = null_output;
	} else {
		/* hook output method setup by ether_ifattach */
		vap->iv_output = ifp->if_output;
		ifp->if_output = ieee80211_output;
	}
	/* NB: if_mtu set by ether_ifattach to ETHERMTU */

	TAILQ_INSERT_TAIL(&ic->ic_vaps, vap, iv_next);
	ieee80211_syncflag_locked(ic, IEEE80211_F_WME);
#ifdef IEEE80211_SUPPORT_SUPERG
	ieee80211_syncflag_locked(ic, IEEE80211_F_TURBOP);
#endif
	ieee80211_syncflag_locked(ic, IEEE80211_F_PCF);
	ieee80211_syncflag_locked(ic, IEEE80211_F_BURST);
	ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_HT);
	ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_USEHT40);
	ieee80211_syncifflag_locked(ic, IFF_PROMISC);
	ieee80211_syncifflag_locked(ic, IFF_ALLMULTI);

	return 1;
}
示例#7
0
void
rtl80x9_media_init(struct dp8390_softc *sc)
{
	static int rtl80x9_media[] = {
		IFM_ETHER|IFM_AUTO,
		IFM_ETHER|IFM_10_T,
		IFM_ETHER|IFM_10_T|IFM_FDX,
		IFM_ETHER|IFM_10_2,
	};
	static const int rtl80x9_nmedia =
	    sizeof(rtl80x9_media) / sizeof(rtl80x9_media[0]);

	int i, defmedia;
	u_int8_t conf2, conf3;

	aprint_normal_dev(sc->sc_dev,
	    "10base2, 10baseT, 10baseT-FDX, auto, default ");

	bus_space_write_1(sc->sc_regt, sc->sc_regh, ED_P0_CR, ED_CR_PAGE_3);

	conf2 = bus_space_read_1(sc->sc_regt, sc->sc_regh, NERTL_RTL3_CONFIG2);
	conf3 = bus_space_read_1(sc->sc_regt, sc->sc_regh, NERTL_RTL3_CONFIG3);
	printf("[0x%02x 0x%02x] ", conf2, conf3);

	conf2 &= RTL3_CONFIG2_PL1|RTL3_CONFIG2_PL0;

	switch (conf2) {
	default:
		defmedia = IFM_ETHER|IFM_AUTO;
		printf("auto\n");
		break;

	case RTL3_CONFIG2_PL1|RTL3_CONFIG2_PL0:
	case RTL3_CONFIG2_PL1:	/* XXX rtl docs sys 10base5, but chip cant do */
		defmedia = IFM_ETHER|IFM_10_2;
		printf("10base2\n");
		break;

	case RTL3_CONFIG2_PL0:
		if (conf3 & RTL3_CONFIG3_FUDUP) {
			defmedia = IFM_ETHER|IFM_10_T|IFM_FDX;
			printf("10baseT-FDX\n");
		} else {
			defmedia = IFM_ETHER|IFM_10_T;
			printf("10baseT\n");
		}
		break;
	}

	bus_space_write_1(sc->sc_regt, sc->sc_regh, ED_P0_CR, ED_CR_PAGE_0);

	ifmedia_init(&sc->sc_media, 0, dp8390_mediachange, dp8390_mediastatus);
	for (i = 0; i < rtl80x9_nmedia; i++)
		ifmedia_add(&sc->sc_media, rtl80x9_media[i], 0, NULL);
	ifmedia_set(&sc->sc_media, defmedia);
}
void
mb8795_config(struct mb8795_softc *sc, int *media, int nmedia, int defmedia)
{
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;

	DPRINTF(("%s: mb8795_config()\n",device_xname(sc->sc_dev)));

	/* Initialize ifnet structure. */
	memcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
	ifp->if_softc = sc;
	ifp->if_start = mb8795_start;
	ifp->if_ioctl = mb8795_ioctl;
	ifp->if_watchdog = mb8795_watchdog;
	ifp->if_flags =
		IFF_BROADCAST | IFF_NOTRAILERS;

	/* Initialize media goo. */
	ifmedia_init(&sc->sc_media, 0, mb8795_mediachange,
		     mb8795_mediastatus);
	if (media != NULL) {
		int i;
		for (i = 0; i < nmedia; i++)
			ifmedia_add(&sc->sc_media, media[i], 0, NULL);
		ifmedia_set(&sc->sc_media, defmedia);
	} else {
		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
	}

  /* Attach the interface. */
  if_attach(ifp);
  ether_ifattach(ifp, sc->sc_enaddr);

  sc->sc_sh = shutdownhook_establish(mb8795_shutdown, sc);
  if (sc->sc_sh == NULL)
    panic("mb8795_config: can't establish shutdownhook");

  rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
                    RND_TYPE_NET, RND_FLAG_DEFAULT);

	DPRINTF(("%s: leaving mb8795_config()\n",device_xname(sc->sc_dev)));
}
/*
 * EtherN media.
 */
void
en_init_media(struct dp8390_softc *sc)
{
	static int en_media[] = {
		IFM_ETHER|IFM_10_T
	};
	aprint_normal_dev(sc->sc_dev, "10baseT, default 10baseT\n");

	ifmedia_init(&sc->sc_media, 0, dp8390_mediachange, dp8390_mediastatus);
	ifmedia_add(&sc->sc_media, en_media[0], 0, NULL);
	ifmedia_set(&sc->sc_media, en_media[0]);
}
示例#10
0
void
nep_attach(struct device *parent, struct device *self, void *aux)
{
	struct nep_softc *sc = (struct nep_softc *)self;
	struct pci_attach_args *pa = aux;
	struct ifnet *ifp = &sc->sc_ac.ac_if;
	struct mii_data *mii = &sc->sc_mii;
	pcireg_t memtype;
	uint64_t cfg;

	sc->sc_dmat = pa->pa_dmat;

	memtype = PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT;
	if (pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
		printf(": can't map registers\n");
		return;
	}

	sc->sc_port = pa->pa_function;

#ifdef __sparc64__
	if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
	    sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) <= 0)
		myetheraddr(sc->sc_ac.ac_enaddr);
#endif

	printf(", address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));

	cfg = nep_read(sc, MIF_CONFIG);
	cfg &= ~MIF_CONFIG_INDIRECT_MODE;
	nep_write(sc, MIF_CONFIG, cfg);

	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
	ifp->if_softc = sc;
	ifp->if_ioctl = nep_ioctl;

	mii->mii_ifp = ifp;
	mii->mii_readreg = nep_mii_readreg;
	mii->mii_writereg = nep_mii_writereg;
	mii->mii_statchg = nep_mii_statchg;

	ifmedia_init(&mii->mii_media, 0, nep_mediachange, nep_mediastatus);

	mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, sc->sc_port, 0);
	ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);

	if_attach(ifp);
	ether_ifattach(ifp);

	timeout_set(&sc->sc_tick_ch, nep_tick, sc);
}
示例#11
0
void
ax88190_media_init(struct dp8390_softc *sc)
{
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;

	sc->sc_mii.mii_ifp = ifp;
	sc->sc_mii.mii_readreg = ax88190_mii_readreg;
	sc->sc_mii.mii_writereg = ax88190_mii_writereg;
	sc->sc_mii.mii_statchg = ax88190_mii_statchg;
	ifmedia_init(&sc->sc_mii.mii_media, 0, dp8390_mediachange,
	    dp8390_mediastatus);

	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);

	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0,
		    NULL);
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
	} else
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
}
//
// called from ti814x_attach() in init.c to hook up
// to the bsd media structure.  Not entirely unlike kissing
// a porcupine, we must do so carefully, because we do not
// want to use the bsd mii management structure, because
// this driver uses link interrupt
//
void
bsd_mii_initmedia(ti814x_dev_t *ti814x)
{
    ti814x->bsd_mii.mii_ifp = &ti814x->ecom.ec_if;

    ifmedia_init(&ti814x->bsd_mii.mii_media, IFM_IMASK, bsd_mii_mediachange,
      bsd_mii_mediastatus);

    // we do NOT call mii_attach() - we do our own link management

    //
    // must create these entries to make ifconfig media work
    // see lib/socket/public/net/if_media.h for defines
    //

    // ifconfig wm0 none (x22)
    ifmedia_add(&ti814x->bsd_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);

    // ifconfig wm0 auto (x20)
    ifmedia_add(&ti814x->bsd_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);

    // ifconfig wm0 10baseT (x23 - half duplex)
    ifmedia_add(&ti814x->bsd_mii.mii_media, IFM_ETHER|IFM_10_T, 0, NULL);

    // ifconfig wm0 10baseT-FDX (x100023)
    ifmedia_add(&ti814x->bsd_mii.mii_media, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);

    // ifconfig wm0 100baseTX (x26 - half duplex)
    ifmedia_add(&ti814x->bsd_mii.mii_media, IFM_ETHER|IFM_100_TX, 0, NULL);

    // ifconfig wm0 100baseTX-FDX (x100026 - full duplex)
    ifmedia_add(&ti814x->bsd_mii.mii_media, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);

    // ifconfig wm0 1000baseT (x30 - half duplex)
    ifmedia_add(&ti814x->bsd_mii.mii_media, IFM_ETHER|IFM_1000_T, 0, NULL);

    // ifconfig wm0 1000baseT mediaopt fdx (x100030 - full duplex)
    ifmedia_add(&ti814x->bsd_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);

    // add more entries to support flow control via ifconfig media

    // link is initially down
    ifmedia_set(&ti814x->bsd_mii.mii_media, IFM_ETHER|IFM_NONE);
}
示例#13
0
int
pdq_ifattach(pdq_softc_t *sc, const pdq_uint8_t *llc, pdq_type_t type)
{
    struct ifnet *ifp;

    ifp = PDQ_IFNET(sc) = if_alloc(IFT_FDDI);
    if (ifp == NULL) {
	device_printf(sc->dev, "can not if_alloc()\n");
	return (ENOSPC);
    }

    mtx_init(&sc->mtx, device_get_nameunit(sc->dev), MTX_NETWORK_LOCK,
	MTX_DEF);
    callout_init_mtx(&sc->watchdog, &sc->mtx, 0);

    if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
    ifp->if_softc = sc;
    ifp->if_init = pdq_ifinit;
    ifp->if_snd.ifq_maxlen = ifqmaxlen;
    ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;

    ifp->if_ioctl = pdq_ifioctl;
    ifp->if_start = pdq_ifstart;

#if defined(IFM_FDDI)
    {
	const int media = sc->sc_ifmedia.ifm_media;
	ifmedia_init(&sc->sc_ifmedia, IFM_FDX,
		     pdq_ifmedia_change, pdq_ifmedia_status);
	ifmedia_add(&sc->sc_ifmedia, media, 0, 0);
	ifmedia_set(&sc->sc_ifmedia, media);
    }
#endif
  
    sc->sc_pdq = pdq_initialize(sc->mem_bst, sc->mem_bsh, ifp->if_xname, -1,
	sc, type);
    if (sc->sc_pdq == NULL) {
	device_printf(sc->dev, "Initialization failed.\n");
	return (ENXIO);
    }

    fddi_ifattach(ifp, llc, FDDI_BPF_SUPPORTED);
    return (0);
}
/*
 * EtherLan 600 media.
 */
void eh600_init_media(struct dp8390_softc *sc)
{
	static int eh600_media[] = {
		IFM_ETHER|IFM_AUTO,
		IFM_ETHER|IFM_10_T,
		IFM_ETHER|IFM_10_2,
	};
	int i, defmedia = IFM_ETHER|IFM_AUTO;
	static const int eh600_nmedia =
	    sizeof(eh600_media) / sizeof(eh600_media[0]);

	aprint_normal_dev(sc->sc_dev,
	    "10base2, 10baseT, auto, default auto\n");

	ifmedia_init(&sc->sc_media, 0, dp8390_mediachange, dp8390_mediastatus);
	for (i = 0; i < eh600_nmedia; i++)
		ifmedia_add(&sc->sc_media, eh600_media[i], 0, NULL);
	ifmedia_set(&sc->sc_media, defmedia);

}
示例#15
0
static void
miibus_mediainit(device_t dev)
{
	struct mii_data		*mii;
	struct ifmedia_entry	*m;
	int			media = 0;

	/* Poke the parent in case it has any media of its own to add. */
	MIIBUS_MEDIAINIT(device_get_parent(dev));

	mii = device_get_softc(dev);
	for (m = LIST_FIRST(&mii->mii_media.ifm_list); m != NULL;
	     m = LIST_NEXT(m, ifm_list)) {
		media = m->ifm_media;
		if (media == (IFM_ETHER|IFM_AUTO))
			break;
	}

	ifmedia_set(&mii->mii_media, media);

	return;
}
示例#16
0
int
pair_clone_create(struct if_clone *ifc, int unit)
{
	struct ifnet		*ifp;
	struct pair_softc	*sc;

	if ((sc = malloc(sizeof(*sc),
	    M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
		return (ENOMEM);

	ifp = &sc->sc_ac.ac_if;
	snprintf(ifp->if_xname, sizeof ifp->if_xname, "pair%d", unit);
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ether_fakeaddr(ifp);

	ifp->if_softc = sc;
	ifp->if_ioctl = pairioctl;
	ifp->if_start = pairstart;
	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
	IFQ_SET_READY(&ifp->if_snd);

	ifp->if_hardmtu = 0xffff;
	ifp->if_capabilities = IFCAP_VLAN_MTU;

	ifmedia_init(&sc->sc_media, 0, pair_media_change,
	    pair_media_status);
	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);

	if_attach(ifp);
	ether_ifattach(ifp);

	pair_link_state(ifp);

	return (0);
}
示例#17
0
static int
gx_attach(device_t dev)
{
	struct ifnet *ifp;
	struct gx_softc *sc;
	uint8_t mac[6];
	int error;
	int rid;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;
	sc->sc_port = device_get_unit(dev);

	/* Read MAC address.  */
	GXEMUL_ETHER_DEV_WRITE(GXEMUL_ETHER_DEV_MAC, (uintptr_t)mac);

	/* Allocate and establish interrupt.  */
	rid = 0;
	sc->sc_intr = bus_alloc_resource(sc->sc_dev, SYS_RES_IRQ, &rid,
	    GXEMUL_ETHER_DEV_IRQ - 2, GXEMUL_ETHER_DEV_IRQ - 2, 1, RF_ACTIVE);
	if (sc->sc_intr == NULL) {
		device_printf(dev, "unable to allocate IRQ.\n");
		return (ENXIO);
	}

	error = bus_setup_intr(sc->sc_dev, sc->sc_intr, INTR_TYPE_NET, NULL,
	    gx_rx_intr, sc, &sc->sc_intr_cookie);
	if (error != 0) {
		device_printf(dev, "unable to setup interrupt.\n");
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_intr);
		return (ENXIO);
	}

	bus_describe_intr(sc->sc_dev, sc->sc_intr, sc->sc_intr_cookie, "rx");

	ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(dev, "cannot allocate ifnet.\n");
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_intr);
		return (ENOMEM);
	}

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_mtu = ETHERMTU;
	ifp->if_init = gx_init;
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI;
	ifp->if_ioctl = gx_ioctl;

	sc->sc_ifp = ifp;
	sc->sc_flags = ifp->if_flags;

	ifmedia_init(&sc->sc_ifmedia, 0, gx_medchange, gx_medstat);

	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);

	mtx_init(&sc->sc_mtx, "GXemul Ethernet", NULL, MTX_DEF);

	ether_ifattach(ifp, mac);

	ifp->if_transmit = gx_transmit;

	return (bus_generic_attach(dev));
}
示例#18
0
void
nfe_attach(struct device *parent, struct device *self, void *aux)
{
	struct nfe_softc *sc = (struct nfe_softc *)self;
	struct pci_attach_args *pa = aux;
	pci_chipset_tag_t pc = pa->pa_pc;
	pci_intr_handle_t ih;
	const char *intrstr;
	struct ifnet *ifp;
	bus_size_t memsize;
	pcireg_t memtype;

	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
	if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
	    &sc->sc_memh, NULL, &memsize, 0)) {
		printf(": can't map mem space\n");
		return;
	}

	if (pci_intr_map(pa, &ih) != 0) {
		printf(": can't map interrupt\n");
		return;
	}

	intrstr = pci_intr_string(pc, ih);
	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
	    sc->sc_dev.dv_xname);
	if (sc->sc_ih == NULL) {
		printf(": could not establish interrupt");
		if (intrstr != NULL)
			printf(" at %s", intrstr);
		printf("\n");
		return;
	}
	printf(": %s", intrstr);

	sc->sc_dmat = pa->pa_dmat;
	sc->sc_flags = 0;

	switch (PCI_PRODUCT(pa->pa_id)) {
	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
		break;
	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
		sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
		break;
	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
		sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR |
		    NFE_PWR_MGMT;
		break;
	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
		sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM |
		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
		break;
	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
	case PCI_PRODUCT_NVIDIA_MCP89_LAN:
		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
		break;
	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
		break;
	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
		break;
	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
		    NFE_HW_VLAN | NFE_PWR_MGMT;
		break;
	}

	if (sc->sc_flags & NFE_PWR_MGMT) {
		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
		NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
		DELAY(100);
		NFE_WRITE(sc, NFE_MAC_RESET, 0);
		DELAY(100);
		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
		NFE_WRITE(sc, NFE_PWR2_CTL,
		    NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK);
	}

	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));

	/*
	 * Allocate Tx and Rx rings.
	 */
	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
		printf("%s: could not allocate Tx ring\n",
		    sc->sc_dev.dv_xname);
		return;
	}

	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
		printf("%s: could not allocate Rx ring\n",
		    sc->sc_dev.dv_xname);
		nfe_free_tx_ring(sc, &sc->txq);
		return;
	}

	ifp = &sc->sc_arpcom.ac_if;
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = nfe_ioctl;
	ifp->if_start = nfe_start;
	ifp->if_watchdog = nfe_watchdog;
	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
	IFQ_SET_READY(&ifp->if_snd);
	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);

	ifp->if_capabilities = IFCAP_VLAN_MTU;

#ifndef SMALL_KERNEL
	ifp->if_capabilities |= IFCAP_WOL;
	ifp->if_wol = nfe_wol;
	nfe_wol(ifp, 0);
#endif

#if NVLAN > 0
	if (sc->sc_flags & NFE_HW_VLAN)
		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
#endif

	if (sc->sc_flags & NFE_HW_CSUM) {
		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
		    IFCAP_CSUM_UDPv4;
	}

	sc->sc_mii.mii_ifp = ifp;
	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
	sc->sc_mii.mii_statchg = nfe_miibus_statchg;

	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
	    nfe_ifmedia_sts);
	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0);
	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
		    0, NULL);
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
	} else
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);

	if_attach(ifp);
	ether_ifattach(ifp);

	timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
}
示例#19
0
/*
 * cas_config:
 *
 *	Attach a Cassini interface to the system.
 */
void
cas_config(struct cas_softc *sc)
{
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	struct mii_data *mii = &sc->sc_mii;
	struct mii_softc *child;
	int i, error;

	/* Make sure the chip is stopped. */
	ifp->if_softc = sc;
	cas_reset(sc);

	/*
	 * Allocate the control data structures, and create and load the
	 * DMA map for it.
	 */
	if ((error = bus_dmamem_alloc(sc->sc_dmatag,
	    sizeof(struct cas_control_data), CAS_PAGE_SIZE, 0, &sc->sc_cdseg,
	    1, &sc->sc_cdnseg, 0)) != 0) {
		printf("\n%s: unable to allocate control data, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_0;
	}

	/* XXX should map this in with correct endianness */
	if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
	    sizeof(struct cas_control_data), (caddr_t *)&sc->sc_control_data,
	    BUS_DMA_COHERENT)) != 0) {
		printf("\n%s: unable to map control data, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_1;
	}

	if ((error = bus_dmamap_create(sc->sc_dmatag,
	    sizeof(struct cas_control_data), 1,
	    sizeof(struct cas_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
		printf("\n%s: unable to create control data DMA map, "
		    "error = %d\n", sc->sc_dev.dv_xname, error);
		goto fail_2;
	}

	if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
	    sc->sc_control_data, sizeof(struct cas_control_data), NULL,
	    0)) != 0) {
		printf("\n%s: unable to load control data DMA map, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_3;
	}

	bzero(sc->sc_control_data, sizeof(struct cas_control_data));

	/*
	 * Create the receive buffer DMA maps.
	 */
	for (i = 0; i < CAS_NRXDESC; i++) {
		bus_dma_segment_t seg;
		caddr_t kva;
		int rseg;

		if ((error = bus_dmamem_alloc(sc->sc_dmatag, CAS_PAGE_SIZE,
		    CAS_PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
			printf("\n%s: unable to alloc rx DMA mem %d, "
			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
			goto fail_5;
		}
		sc->sc_rxsoft[i].rxs_dmaseg = seg;

		if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
		    CAS_PAGE_SIZE, &kva, BUS_DMA_NOWAIT)) != 0) {
			printf("\n%s: unable to alloc rx DMA mem %d, "
			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
			goto fail_5;
		}
		sc->sc_rxsoft[i].rxs_kva = kva;

		if ((error = bus_dmamap_create(sc->sc_dmatag, CAS_PAGE_SIZE, 1,
		    CAS_PAGE_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
			printf("\n%s: unable to create rx DMA map %d, "
			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
			goto fail_5;
		}

		if ((error = bus_dmamap_load(sc->sc_dmatag,
		   sc->sc_rxsoft[i].rxs_dmamap, kva, CAS_PAGE_SIZE, NULL,
		   BUS_DMA_NOWAIT)) != 0) {
			printf("\n%s: unable to load rx DMA map %d, "
			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
			goto fail_5;
		}
	}

	/*
	 * Create the transmit buffer DMA maps.
	 */
	for (i = 0; i < CAS_NTXDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,
		    CAS_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
		    &sc->sc_txd[i].sd_map)) != 0) {
			printf("\n%s: unable to create tx DMA map %d, "
			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
			goto fail_6;
		}
		sc->sc_txd[i].sd_mbuf = NULL;
	}

	/*
	 * From this point forward, the attachment cannot fail.  A failure
	 * before this point releases all resources that may have been
	 * allocated.
	 */

	/* Announce ourselves. */
	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));

	/* Get RX FIFO size */
	sc->sc_rxfifosize = 16 * 1024;

	/* Initialize ifnet structure. */
	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
	ifp->if_softc = sc;
	ifp->if_flags =
	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
	ifp->if_start = cas_start;
	ifp->if_ioctl = cas_ioctl;
	ifp->if_watchdog = cas_watchdog;
	IFQ_SET_MAXLEN(&ifp->if_snd, CAS_NTXDESC - 1);
	IFQ_SET_READY(&ifp->if_snd);

	ifp->if_capabilities = IFCAP_VLAN_MTU;

	/* Initialize ifmedia structures and MII info */
	mii->mii_ifp = ifp;
	mii->mii_readreg = cas_mii_readreg;
	mii->mii_writereg = cas_mii_writereg;
	mii->mii_statchg = cas_mii_statchg;

	ifmedia_init(&mii->mii_media, 0, cas_mediachange, cas_mediastatus);

	bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_MII_DATAPATH_MODE, 0);

	cas_mifinit(sc);

	if (sc->sc_mif_config & CAS_MIF_CONFIG_MDI1) {
		sc->sc_mif_config |= CAS_MIF_CONFIG_PHY_SEL;
		bus_space_write_4(sc->sc_memt, sc->sc_memh,
	            CAS_MIF_CONFIG, sc->sc_mif_config);
	}

	mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);

	child = LIST_FIRST(&mii->mii_phys);
	if (child == NULL &&
	    sc->sc_mif_config & (CAS_MIF_CONFIG_MDI0|CAS_MIF_CONFIG_MDI1)) {
		/* 
		 * Try the external PCS SERDES if we didn't find any
		 * MII devices.
		 */
		bus_space_write_4(sc->sc_memt, sc->sc_memh,
		    CAS_MII_DATAPATH_MODE, CAS_MII_DATAPATH_SERDES);

		bus_space_write_4(sc->sc_memt, sc->sc_memh,
		     CAS_MII_CONFIG, CAS_MII_CONFIG_ENABLE);

		mii->mii_readreg = cas_pcs_readreg;
		mii->mii_writereg = cas_pcs_writereg;

		mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
		    MII_OFFSET_ANY, MIIF_NOISOLATE);
	}

	child = LIST_FIRST(&mii->mii_phys);
	if (child == NULL) {
		/* No PHY attached */
		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
	} else {
		/*
		 * Walk along the list of attached MII devices and
		 * establish an `MII instance' to `phy number'
		 * mapping. We'll use this mapping in media change
		 * requests to determine which phy to use to program
		 * the MIF configuration register.
		 */
		for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
			/*
			 * Note: we support just two PHYs: the built-in
			 * internal device and an external on the MII
			 * connector.
			 */
			if (child->mii_phy > 1 || child->mii_inst > 1) {
				printf("%s: cannot accommodate MII device %s"
				       " at phy %d, instance %d\n",
				       sc->sc_dev.dv_xname,
				       child->mii_dev.dv_xname,
				       child->mii_phy, child->mii_inst);
				continue;
			}

			sc->sc_phys[child->mii_inst] = child->mii_phy;
		}

		/*
		 * XXX - we can really do the following ONLY if the
		 * phy indeed has the auto negotiation capability!!
		 */
		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
	}

	/* Attach the interface. */
	if_attach(ifp);
	ether_ifattach(ifp);

	sc->sc_sh = shutdownhook_establish(cas_shutdown, sc);
	if (sc->sc_sh == NULL)
		panic("cas_config: can't establish shutdownhook");

	timeout_set(&sc->sc_tick_ch, cas_tick, sc);
	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt.  Do this in reverse order and fall through.
	 */
 fail_6:
	for (i = 0; i < CAS_NTXDESC; i++) {
		if (sc->sc_txd[i].sd_map != NULL)
			bus_dmamap_destroy(sc->sc_dmatag,
			    sc->sc_txd[i].sd_map);
	}
 fail_5:
	for (i = 0; i < CAS_NRXDESC; i++) {
		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmatag,
			    sc->sc_rxsoft[i].rxs_dmamap);
	}
	bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
 fail_3:
	bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
 fail_2:
	bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
	    sizeof(struct cas_control_data));
 fail_1:
	bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
 fail_0:
	return;
}
示例#20
0
文件: if_ex.c 项目: AhmadTux/freebsd
int
ex_attach(device_t dev)
{
	struct ex_softc *	sc = device_get_softc(dev);
	struct ifnet *		ifp;
	struct ifmedia *	ifm;
	int			error;
	uint16_t		temp;

	ifp = sc->ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(dev, "can not if_alloc()\n");
		return (ENOSPC);
	}
	/* work out which set of irq <-> internal tables to use */
	if (ex_card_type(sc->enaddr) == CARD_TYPE_EX_10_PLUS) {
		sc->irq2ee = plus_irq2eemap;
		sc->ee2irq = plus_ee2irqmap;
	} else {
		sc->irq2ee = irq2eemap;
		sc->ee2irq = ee2irqmap;
	}

	sc->mem_size = CARD_RAM_SIZE;	/* XXX This should be read from the card itself. */

	/*
	 * Initialize the ifnet structure.
	 */
	ifp->if_softc = sc;
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
	ifp->if_start = ex_start;
	ifp->if_ioctl = ex_ioctl;
	ifp->if_init = ex_init;
	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);

	ifmedia_init(&sc->ifmedia, 0, ex_ifmedia_upd, ex_ifmedia_sts);
	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
	    MTX_DEF);
	callout_init_mtx(&sc->timer, &sc->lock, 0);

	temp = ex_eeprom_read(sc, EE_W5);
	if (temp & EE_W5_PORT_TPE)
		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
	if (temp & EE_W5_PORT_BNC)
		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
	if (temp & EE_W5_PORT_AUI)
		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);

	ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_NONE, 0, NULL);
	ifmedia_set(&sc->ifmedia, ex_get_media(sc));

	ifm = &sc->ifmedia;
	ifm->ifm_media = ifm->ifm_cur->ifm_media;	
	ex_ifmedia_upd(ifp);

	/*
	 * Attach the interface.
	 */
	ether_ifattach(ifp, sc->enaddr);

	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
				NULL, ex_intr, (void *)sc, &sc->ih);
	if (error) {
		device_printf(dev, "bus_setup_intr() failed!\n");
		ether_ifdetach(ifp);
		mtx_destroy(&sc->lock);
		return (error);
	}

	return(0);
}
示例#21
0
static int
admsw_attach(device_t dev)
{
	uint8_t enaddr[ETHER_ADDR_LEN];
	struct admsw_softc *sc = (struct admsw_softc *) device_get_softc(dev);
	struct ifnet *ifp;
	int error, i, rid;

	sc->sc_dev = dev;
	device_printf(dev, "ADM5120 Switch Engine, %d ports\n", SW_DEVS);
	sc->ndevs = 0;

	/* XXXMIPS: fix it */
	enaddr[0] = 0x00;
	enaddr[1] = 0x0C;
	enaddr[2] = 0x42;
	enaddr[3] = 0x07;
	enaddr[4] = 0xB2;
	enaddr[5] = 0x4E;

	memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr));

	device_printf(sc->sc_dev, "base Ethernet address %s\n",
	    ether_sprintf(enaddr));
	callout_init(&sc->sc_watchdog, 1);

	rid = 0;
	if ((sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 
	    RF_ACTIVE)) == NULL) {
                device_printf(dev, "unable to allocate memory resource\n");
                return (ENXIO);
        }

	/* Hook up the interrupt handler. */
	rid = 0;
	if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 
	    RF_SHAREABLE | RF_ACTIVE)) == NULL) {
                device_printf(dev, "unable to allocate IRQ resource\n");
                return (ENXIO);
        }

	if ((error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET, 
	    admsw_intr, NULL, sc, &sc->sc_ih)) != 0) {
                device_printf(dev, 
                    "WARNING: unable to register interrupt handler\n");
                return (error);
        }

	/*
	 * Allocate the control data structures, and create and load the
	 * DMA map for it.
	 */
	if ((error = bus_dma_tag_create(NULL, 4, 0, 
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
	    NULL, NULL, sizeof(struct admsw_control_data), 1,
	    sizeof(struct admsw_control_data), 0, NULL, NULL, 
	    &sc->sc_control_dmat)) != 0) {
		device_printf(sc->sc_dev, 
		    "unable to create control data DMA map, error = %d\n", 
		    error);
		return (error);
	}

	if ((error = bus_dmamem_alloc(sc->sc_control_dmat,
	    (void **)&sc->sc_control_data, BUS_DMA_NOWAIT, 
	    &sc->sc_cddmamap)) != 0) {
		device_printf(sc->sc_dev, 
		    "unable to allocate control data, error = %d\n", error);
		return (error);
	}

	if ((error = bus_dmamap_load(sc->sc_control_dmat, sc->sc_cddmamap,
	    sc->sc_control_data, sizeof(struct admsw_control_data), 
	    admsw_dma_map_addr, &sc->sc_cddma, 0)) != 0) {
		device_printf(sc->sc_dev, 
		    "unable to load control data DMA map, error = %d\n", error);
		return (error);
	}

	/*
	 * Create the transmit buffer DMA maps.
	 */
	if ((error = bus_dma_tag_create(NULL, 1, 0, 
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
	    NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, 
	    &sc->sc_bufs_dmat)) != 0) {
		device_printf(sc->sc_dev, 
		    "unable to create control data DMA map, error = %d\n", 
		    error);
		return (error);
	}

	for (i = 0; i < ADMSW_NTXHDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
		    &sc->sc_txhsoft[i].ds_dmamap)) != 0) {
			device_printf(sc->sc_dev, 
			    "unable to create txh DMA map %d, error = %d\n", 
			    i, error);
			return (error);
		}
		sc->sc_txhsoft[i].ds_mbuf = NULL;
	}

	for (i = 0; i < ADMSW_NTXLDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
		    &sc->sc_txlsoft[i].ds_dmamap)) != 0) {
			device_printf(sc->sc_dev, 
			    "unable to create txl DMA map %d, error = %d\n", 
			    i, error);
			return (error);
		}
		sc->sc_txlsoft[i].ds_mbuf = NULL;
	}

	/*
	 * Create the receive buffer DMA maps.
	 */
	for (i = 0; i < ADMSW_NRXHDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0, 
		     &sc->sc_rxhsoft[i].ds_dmamap)) != 0) {
			device_printf(sc->sc_dev, 
			    "unable to create rxh DMA map %d, error = %d\n", 
			    i, error);
			return (error);
		}
		sc->sc_rxhsoft[i].ds_mbuf = NULL;
	}

	for (i = 0; i < ADMSW_NRXLDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
		    &sc->sc_rxlsoft[i].ds_dmamap)) != 0) {
			device_printf(sc->sc_dev, 
			    "unable to create rxl DMA map %d, error = %d\n",
			    i, error);
			return (error);
		}
		sc->sc_rxlsoft[i].ds_mbuf = NULL;
	}

	admsw_init_bufs(sc);
	admsw_reset(sc);

	for (i = 0; i < SW_DEVS; i++) {
		ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, 
		    admsw_mediastatus);
		ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL);
		ifmedia_add(&sc->sc_ifmedia[i], 
		    IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
		ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL);
		ifmedia_add(&sc->sc_ifmedia[i], 
		    IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
		ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL);
		ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO);

		ifp = sc->sc_ifnet[i] = if_alloc(IFT_ETHER);

		/* Setup interface parameters */
		ifp->if_softc = sc;
		if_initname(ifp, device_get_name(dev), i);
		ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
		ifp->if_ioctl = admsw_ioctl;
		ifp->if_output = ether_output;
		ifp->if_start = admsw_start;
		ifp->if_init = admsw_init;
		ifp->if_mtu = ETHERMTU;
		ifp->if_baudrate = IF_Mbps(100);
		IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, ifqmaxlen));
		ifp->if_snd.ifq_drv_maxlen = max(ADMSW_NTXLDESC, ifqmaxlen);
		IFQ_SET_READY(&ifp->if_snd);
		ifp->if_capabilities |= IFCAP_VLAN_MTU;

		/* Attach the interface. */
		ether_ifattach(ifp, enaddr);
		enaddr[5]++;
	}

	/* XXX: admwdog_attach(sc); */

	/* leave interrupts and cpu port disabled */
	return (0);
}
示例#22
0
static int
octm_attach(device_t dev)
{
	struct ifnet *ifp;
	struct octm_softc *sc;
	cvmx_mixx_irhwm_t mixx_irhwm;
	cvmx_mixx_intena_t mixx_intena;
	uint64_t mac;
	int error;
	int irq;
	int rid;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;
	sc->sc_port = device_get_unit(dev);

	switch (sc->sc_port) {
	case 0:
		irq = OCTEON_IRQ_MII;
		break;
	case 1:
		irq = OCTEON_IRQ_MII1;
		break;
	default:
		device_printf(dev, "unsupported management port %u.\n", sc->sc_port);
		return (ENXIO);
	}

	/*
	 * Set MAC address for this management port.
	 */
	mac = 0;
	memcpy((u_int8_t *)&mac + 2, cvmx_sysinfo_get()->mac_addr_base, 6);
	mac += sc->sc_port;

	cvmx_mgmt_port_set_mac(sc->sc_port, mac);

	/* No watermark for input ring.  */
	mixx_irhwm.u64 = 0;
	cvmx_write_csr(CVMX_MIXX_IRHWM(sc->sc_port), mixx_irhwm.u64);

	/* Enable input ring interrupts.  */
	mixx_intena.u64 = 0;
	mixx_intena.s.ithena = 1;
	cvmx_write_csr(CVMX_MIXX_INTENA(sc->sc_port), mixx_intena.u64);

	/* Allocate and establish interrupt.  */
	rid = 0;
	sc->sc_intr = bus_alloc_resource(sc->sc_dev, SYS_RES_IRQ, &rid,
	    irq, irq, 1, RF_ACTIVE);
	if (sc->sc_intr == NULL) {
		device_printf(dev, "unable to allocate IRQ.\n");
		return (ENXIO);
	}

	error = bus_setup_intr(sc->sc_dev, sc->sc_intr, INTR_TYPE_NET, NULL,
	    octm_rx_intr, sc, &sc->sc_intr_cookie);
	if (error != 0) {
		device_printf(dev, "unable to setup interrupt.\n");
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_intr);
		return (ENXIO);
	}

	bus_describe_intr(sc->sc_dev, sc->sc_intr, sc->sc_intr_cookie, "rx");

	/* XXX Possibly should enable TX interrupts.  */

	ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(dev, "cannot allocate ifnet.\n");
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_intr);
		return (ENOMEM);
	}

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_mtu = ETHERMTU;
	ifp->if_init = octm_init;
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI;
	ifp->if_ioctl = octm_ioctl;

	sc->sc_ifp = ifp;
	sc->sc_flags = ifp->if_flags;

	ifmedia_init(&sc->sc_ifmedia, 0, octm_medchange, octm_medstat);

	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);

	ether_ifattach(ifp, (const u_int8_t *)&mac + 2);

	ifp->if_transmit = octm_transmit;

	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
	ifp->if_capabilities = IFCAP_VLAN_MTU;
	ifp->if_capenable = ifp->if_capabilities;

	IFQ_SET_MAXLEN(&ifp->if_snd, CVMX_MGMT_PORT_NUM_TX_BUFFERS);
	ifp->if_snd.ifq_drv_maxlen = CVMX_MGMT_PORT_NUM_TX_BUFFERS;
	IFQ_SET_READY(&ifp->if_snd);

	return (bus_generic_attach(dev));
}
示例#23
0
int
ed_probe_RTL80x9(device_t dev, int port_rid, int flags)
{
	struct ed_softc *sc = device_get_softc(dev);
	char *ts;
	int error;

	if ((error = ed_alloc_port(dev, port_rid, ED_NOVELL_IO_PORTS)))
		return (error);
	
	sc->asic_offset = ED_NOVELL_ASIC_OFFSET;
	sc->nic_offset  = ED_NOVELL_NIC_OFFSET;

	if (ed_nic_inb(sc, ED_P0_CR) & (ED_CR_PS0 | ED_CR_PS1))
		ed_nic_outb(sc, ED_P0_CR, ED_CR_RD2 | ED_CR_STP);

	if (ed_nic_inb(sc, ED_RTL80X9_80X9ID0) != ED_RTL80X9_ID0)
		return (ENXIO);

	switch (ed_nic_inb(sc, ED_RTL80X9_80X9ID1)) {
	case ED_RTL8019_ID1:
		sc->chip_type = ED_CHIP_TYPE_RTL8019;
		ts = "RTL8019";
		break;
	case ED_RTL8029_ID1:
		sc->chip_type = ED_CHIP_TYPE_RTL8029;
		ts = "RTL8029";
		break;
	default:
		return (ENXIO);
	}

	if ((error = ed_probe_Novell_generic(dev, flags)))
		return (error);

	sc->type_str = ts;
	sc->sc_media_ioctl = &ed_rtl80x9_media_ioctl;
	ifmedia_init(&sc->ifmedia, 0, ed_rtl_set_media, ed_rtl_get_media);

	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, 0);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, 0);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_2, 0, 0);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_5, 0, 0);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, 0);

	ed_nic_barrier(sc, ED_P0_CR, 1,
	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
	ed_nic_outb(sc, ED_P0_CR, ED_CR_RD2 | ED_CR_PAGE_3 | ED_CR_STP);
	ed_nic_barrier(sc, ED_P0_CR, 1,
	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);

	switch (ed_nic_inb(sc, ED_RTL80X9_CONFIG2) & ED_RTL80X9_CF2_MEDIA) {
	case ED_RTL80X9_CF2_AUTO:
		ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_AUTO);
		break;
	case ED_RTL80X9_CF2_10_5:
		ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_10_5);
		break;
	case ED_RTL80X9_CF2_10_2:
		ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_10_2);
		break;
	case ED_RTL80X9_CF2_10_T:
		ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_10_T |
		    ((ed_nic_inb(sc, ED_RTL80X9_CONFIG3)
		    & ED_RTL80X9_CF3_FUDUP) ? IFM_FDX : 0));
		break;
	}
	return (0);
}
示例#24
0
/* Attach the interface. Allocate softc structures */
static int
sln_attach(device_t dev)
{
	struct sln_softc *sc = device_get_softc(dev);
	struct ifnet *ifp = &sc->arpcom.ac_if;
	unsigned char eaddr[ETHER_ADDR_LEN];
	int rid;
	int error = 0;

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));

	/* TODO: power state change */

	pci_enable_busmaster(dev);

	rid = SL_RID;
	sc->sln_res = bus_alloc_resource_any(dev, SL_RES, &rid, RF_ACTIVE);
	if (sc->sln_res == NULL) {
		device_printf(dev, "couldn't map ports/memory\n");
		error = ENXIO;
		goto fail;
	}
	sc->sln_bustag = rman_get_bustag(sc->sln_res);
	sc->sln_bushandle = rman_get_bushandle(sc->sln_res);

	/* alloc pci irq */
	rid = 0;
	sc->sln_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
	    RF_SHAREABLE | RF_ACTIVE);
	if (sc->sln_irq == NULL) {
		device_printf(dev, "couldn't map interrupt\n");
		bus_release_resource(dev, SL_RES, SL_RID, sc->sln_res);
		error = ENXIO;
		goto fail;
	}

	/* Get MAC address */
	((uint32_t *)(&eaddr))[0] = be32toh(SLN_READ_4(sc, SL_MAC_ADDR0));
	((uint16_t *)(&eaddr))[2] = be16toh(SLN_READ_4(sc, SL_MAC_ADDR1));

	/* alloc rx buffer space */
	sc->sln_bufdata.sln_rx_buf = contigmalloc(SL_RX_BUFLEN,
	    M_DEVBUF, M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
	if (sc->sln_bufdata.sln_rx_buf == NULL) {
		device_printf(dev, "no memory for rx buffers!\n");
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sln_irq);
		bus_release_resource(dev, SL_RES, SL_RID, sc->sln_res);
		error = ENXIO;
		goto fail;
	}
	callout_init(&sc->sln_state);

	ifp->if_softc = sc;
	ifp->if_mtu = ETHERMTU;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_init = sln_init;
	ifp->if_start = sln_tx;
	ifp->if_ioctl = sln_ioctl;
	ifp->if_watchdog = sln_watchdog;
	ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
	ifq_set_ready(&ifp->if_snd);

	/* initial media */
	ifmedia_init(&sc->ifmedia, 0, sln_media_upd, sln_media_stat);

	/* supported media types */
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);

	/* Choose a default media. */
	ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_AUTO);

	ether_ifattach(ifp, eaddr, NULL);

	error = bus_setup_intr(dev, sc->sln_irq, INTR_MPSAFE, sln_interrupt, sc,
			       &sc->sln_intrhand, ifp->if_serializer);
	if (error) {
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sln_irq);
		bus_release_resource(dev, SL_RES, SL_RID, sc->sln_res);
		ether_ifdetach(ifp);
		device_printf(dev, "couldn't set up irq\n");
		goto fail;
	}

	ifp->if_cpuid = rman_get_cpuid(sc->sln_irq);
	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);

	return 0;
fail:
	return error;
}
示例#25
0
/* Set media speed and duplex mode */
static void
sln_media_cfg(struct sln_softc *sc)
{
	u_long phys[2];
	uint32_t mediatype;
	uint32_t temp;

	mediatype = (&sc->ifmedia)->ifm_cur->ifm_media;

	temp = SLN_READ_4(sc, SL_PHY_CTRL);
	temp &= ~(SL_PHYCTL_DUX | SL_PHYCTL_SPD100 | SL_PHYCTL_SPD10);
	temp |= (SL_PHYCTL_ANE | SL_PHYCTL_RESET);

	/************************************************/
	/* currently set media word by selected media   */
	/*                                              */
	/* IFM_ETHER = 0x00000020                       */
	/* IFM_AUTO=0, IFM_10_T=3,  IFM_100_TX=6        */
	/* IFM_FDX=0x00100000    IFM_HDX=0x00200000     */
	/************************************************/
	switch (mediatype) {
	case 0x00000020:
		PDEBUG(" autoselet supported\n");
		temp |= (SL_PHYCTL_DUX | SL_PHYCTL_SPD100 | SL_PHYCTL_SPD10);
		sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
		ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_AUTO);
		break;
	case 0x23:
	case 0x00200023:
		PDEBUG(" 10Mbps half_duplex supported\n");
		temp |= SL_PHYCTL_SPD10;
		sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
		ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX);
		break;

	case 0x00100023:
		PDEBUG("10Mbps full_duplex supported\n");
		temp |= (SL_PHYCTL_SPD10 | SL_PHYCTL_DUX);
		sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
		ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX);
		break;

	case 0x26:
	case 0x00200026:
		PDEBUG("100Mbps half_duplex supported\n");
		temp |= SL_PHYCTL_SPD100;
		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
		ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX);
		break;

	case 0x00100026:
		PDEBUG("100Mbps full_duplex supported\n");
		temp |= (SL_PHYCTL_SPD100 | SL_PHYCTL_DUX);
		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
		ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
		break;

	default:
		break;
	}

	SLN_WRITE_4(sc, SL_PHY_CTRL, temp);

	DELAY(10000);
	temp &= ~SL_PHYCTL_RESET;
	SLN_WRITE_4(sc, SL_PHY_CTRL, temp);

	DELAY(1000);
	phys[0] = SL_MII_JAB;
	phys[1] = SL_PHY_16_JAB_ENB | SL_PHY_16_PORT_ENB;
	sln_mii_cmd(sc, SL_MII0_WRITE, phys);

	sc->connect = 0;
	sln_mii_cmd(sc, SL_MII0_SCAN, phys);
}
示例#26
0
/*
 * Setup the media data structures according to the channel and
 * rate tables.  This must be called by the driver after
 * ieee80211_attach and before most anything else.
 */
void
ieee80211_media_init(struct ieee80211com *ic,
	ifm_change_cb_t media_change, ifm_stat_cb_t media_stat)
{
#define	ADD(_ic, _s, _o) \
	ifmedia_add(&(_ic)->ic_media, \
		IFM_MAKEWORD(IFM_IEEE80211, (_s), (_o), 0), 0, NULL)
	struct ifnet *ifp = ic->ic_ifp;
	struct ifmediareq imr;
	int i, j, mode, rate, maxrate, mword, mopt, r;
	const struct ieee80211_rateset *rs;
	struct ieee80211_rateset allrates;

	/*
	 * Do late attach work that must wait for any subclass
	 * (i.e. driver) work such as overriding methods.
	 */
	ieee80211_node_lateattach(ic);

#ifdef IEEE80211_NO_HOSTAP
	ic->ic_caps &= ~IEEE80211_C_HOSTAP;
#endif /* IEEE80211_NO_HOSTAP */

	/*
	 * Fill in media characteristics.
	 */
	ifmedia_init(&ic->ic_media, 0, media_change, media_stat);
	maxrate = 0;
	memset(&allrates, 0, sizeof(allrates));
	for (mode = IEEE80211_MODE_AUTO; mode < IEEE80211_MODE_MAX; mode++) {
		static const u_int mopts[] = { 
			IFM_AUTO,
			IFM_IEEE80211_11A,
			IFM_IEEE80211_11B,
			IFM_IEEE80211_11G,
			IFM_IEEE80211_FH,
			IFM_IEEE80211_11A | IFM_IEEE80211_TURBO,
			IFM_IEEE80211_11G | IFM_IEEE80211_TURBO,
		};
		if ((ic->ic_modecaps & (1<<mode)) == 0)
			continue;
		mopt = mopts[mode];
		ADD(ic, IFM_AUTO, mopt);	/* e.g. 11a auto */
		if (ic->ic_caps & IEEE80211_C_IBSS)
			ADD(ic, IFM_AUTO, mopt | IFM_IEEE80211_ADHOC);
		if (ic->ic_caps & IEEE80211_C_HOSTAP)
			ADD(ic, IFM_AUTO, mopt | IFM_IEEE80211_HOSTAP);
		if (ic->ic_caps & IEEE80211_C_AHDEMO)
			ADD(ic, IFM_AUTO, mopt | IFM_IEEE80211_ADHOC | IFM_FLAG0);
		if (ic->ic_caps & IEEE80211_C_MONITOR)
			ADD(ic, IFM_AUTO, mopt | IFM_IEEE80211_MONITOR);
		if (mode == IEEE80211_MODE_AUTO)
			continue;
		rs = &ic->ic_sup_rates[mode];
		for (i = 0; i < rs->rs_nrates; i++) {
			rate = rs->rs_rates[i];
			mword = ieee80211_rate2media(ic, rate, mode);
			if (mword == 0)
				continue;
			ADD(ic, mword, mopt);
			if (ic->ic_caps & IEEE80211_C_IBSS)
				ADD(ic, mword, mopt | IFM_IEEE80211_ADHOC);
			if (ic->ic_caps & IEEE80211_C_HOSTAP)
				ADD(ic, mword, mopt | IFM_IEEE80211_HOSTAP);
			if (ic->ic_caps & IEEE80211_C_AHDEMO)
				ADD(ic, mword, mopt | IFM_IEEE80211_ADHOC | IFM_FLAG0);
			if (ic->ic_caps & IEEE80211_C_MONITOR)
				ADD(ic, mword, mopt | IFM_IEEE80211_MONITOR);
			/*
			 * Add rate to the collection of all rates.
			 */
			r = rate & IEEE80211_RATE_VAL;
			for (j = 0; j < allrates.rs_nrates; j++)
				if (allrates.rs_rates[j] == r)
					break;
			if (j == allrates.rs_nrates) {
				/* unique, add to the set */
				allrates.rs_rates[j] = r;
				allrates.rs_nrates++;
			}
			rate = (rate & IEEE80211_RATE_VAL) / 2;
			if (rate > maxrate)
				maxrate = rate;
		}
	}
	for (i = 0; i < allrates.rs_nrates; i++) {
		mword = ieee80211_rate2media(ic, allrates.rs_rates[i],
				IEEE80211_MODE_AUTO);
		if (mword == 0)
			continue;
		mword = IFM_SUBTYPE(mword);	/* remove media options */
		ADD(ic, mword, 0);
		if (ic->ic_caps & IEEE80211_C_IBSS)
			ADD(ic, mword, IFM_IEEE80211_ADHOC);
		if (ic->ic_caps & IEEE80211_C_HOSTAP)
			ADD(ic, mword, IFM_IEEE80211_HOSTAP);
		if (ic->ic_caps & IEEE80211_C_AHDEMO)
			ADD(ic, mword, IFM_IEEE80211_ADHOC | IFM_FLAG0);
		if (ic->ic_caps & IEEE80211_C_MONITOR)
			ADD(ic, mword, IFM_IEEE80211_MONITOR);
	}
	ieee80211_media_status(ifp, &imr);
	ifmedia_set(&ic->ic_media, imr.ifm_active);

	if (maxrate)
		ifp->if_baudrate = IF_Mbps(maxrate);
#undef ADD
}
示例#27
0
文件: octe.c 项目: coyizumi/cs111
static int
octe_attach(device_t dev)
{
	struct ifnet *ifp;
	cvm_oct_private_t *priv;
	device_t child;
	unsigned qos;
	int error;

	priv = device_get_softc(dev);
	ifp = priv->ifp;

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));

	if (priv->phy_id != -1) {
		if (priv->phy_device == NULL) {
			error = mii_attach(dev, &priv->miibus, ifp,
			    octe_mii_medchange, octe_mii_medstat,
			    BMSR_DEFCAPMASK, priv->phy_id, MII_OFFSET_ANY, 0);
			if (error != 0)
				device_printf(dev, "attaching PHYs failed\n");
		} else {
			child = device_add_child(dev, priv->phy_device, -1);
			if (child == NULL)
				device_printf(dev, "missing phy %u device %s\n", priv->phy_id, priv->phy_device);
		}
	}

	if (priv->miibus == NULL) {
		ifmedia_init(&priv->media, 0, octe_medchange, octe_medstat);

		ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
		ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
	}

	/*
	 * XXX
	 * We don't support programming the multicast filter right now, although it
	 * ought to be easy enough.  (Presumably it's just a matter of putting
	 * multicast addresses in the CAM?)
	 */
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI;
	ifp->if_init = octe_init;
	ifp->if_ioctl = octe_ioctl;

	priv->if_flags = ifp->if_flags;

	mtx_init(&priv->tx_mtx, ifp->if_xname, "octe tx send queue", MTX_DEF);

	for (qos = 0; qos < 16; qos++) {
		mtx_init(&priv->tx_free_queue[qos].ifq_mtx, ifp->if_xname, "octe tx free queue", MTX_DEF);
		IFQ_SET_MAXLEN(&priv->tx_free_queue[qos], MAX_OUT_QUEUE_DEPTH);
	}

	ether_ifattach(ifp, priv->mac);

	ifp->if_transmit = octe_transmit;

	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM;
	ifp->if_capenable = ifp->if_capabilities;
	ifp->if_hwassist = CSUM_TCP | CSUM_UDP;

	OCTE_TX_LOCK(priv);
	IFQ_SET_MAXLEN(&ifp->if_snd, MAX_OUT_QUEUE_DEPTH);
	ifp->if_snd.ifq_drv_maxlen = MAX_OUT_QUEUE_DEPTH;
	IFQ_SET_READY(&ifp->if_snd);
	OCTE_TX_UNLOCK(priv);

	return (bus_generic_attach(dev));
}
示例#28
0
void
nfe_attach(struct device *parent, struct device *self, void *aux)
{
	struct nfe_softc *sc = (struct nfe_softc *)self;
	struct pci_attach_args *pa = aux;
	pci_chipset_tag_t pc = pa->pa_pc;
	pci_intr_handle_t ih;
	const char *intrstr;
	struct ifnet *ifp;
	bus_size_t memsize;
	pcireg_t memtype;

	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
	switch (memtype) {
	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
		if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
		    &sc->sc_memh, NULL, &memsize, 0) == 0)
			break;
		/* FALLTHROUGH */
	default:
		printf(": could not map mem space\n");
		return;
	}

	if (pci_intr_map(pa, &ih) != 0) {
		printf(": could not map interrupt\n");
		return;
	}

	intrstr = pci_intr_string(pc, ih);
	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
	    sc->sc_dev.dv_xname);
	if (sc->sc_ih == NULL) {
		printf(": could not establish interrupt");
		if (intrstr != NULL)
			printf(" at %s", intrstr);
		printf("\n");
		return;
	}
	printf(": %s", intrstr);

	sc->sc_dmat = pa->pa_dmat;

	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));

	sc->sc_flags = 0;

	switch (PCI_PRODUCT(pa->pa_id)) {
	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
		break;
	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
		sc->sc_flags |= NFE_40BIT_ADDR;
		break;
	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
		break;
	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR;
		break;
	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
		    NFE_HW_VLAN;
		break;
	}

	/* enable jumbo frames for adapters that support it */
	if (sc->sc_flags & NFE_JUMBO_SUP)
		sc->sc_flags |= NFE_USE_JUMBO;

	/*
	 * Allocate Tx and Rx rings.
	 */
	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
		printf("%s: could not allocate Tx ring\n",
		    sc->sc_dev.dv_xname);
		return;
	}

	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
		printf("%s: could not allocate Rx ring\n",
		    sc->sc_dev.dv_xname);
		nfe_free_tx_ring(sc, &sc->txq);
		return;
	}

	ifp = &sc->sc_arpcom.ac_if;
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = nfe_ioctl;
	ifp->if_start = nfe_start;
	ifp->if_watchdog = nfe_watchdog;
	ifp->if_init = nfe_init;
	ifp->if_baudrate = IF_Gbps(1);
	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
	IFQ_SET_READY(&ifp->if_snd);
	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);

	ifp->if_capabilities = IFCAP_VLAN_MTU;

	if (sc->sc_flags & NFE_USE_JUMBO)
		ifp->if_hardmtu = NFE_JUMBO_MTU;

#if NVLAN > 0
	if (sc->sc_flags & NFE_HW_VLAN)
		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
#endif
	if (sc->sc_flags & NFE_HW_CSUM) {
		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
		    IFCAP_CSUM_UDPv4;
	}

	sc->sc_mii.mii_ifp = ifp;
	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
	sc->sc_mii.mii_statchg = nfe_miibus_statchg;

	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
	    nfe_ifmedia_sts);
	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);
	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
		    0, NULL);
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
	} else
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);

	if_attach(ifp);
	ether_ifattach(ifp);

	timeout_set(&sc->sc_tick_ch, nfe_tick, sc);

	sc->sc_powerhook = powerhook_establish(nfe_power, sc);
}
示例#29
0
void
mec_attach(struct device *parent, struct device *self, void *aux)
{
	struct mec_softc *sc = (void *)self;
	struct confargs *ca = aux;
	struct ifnet *ifp = &sc->sc_ac.ac_if;
	uint32_t command;
	struct mii_softc *child;
	bus_dma_segment_t seg;
	int i, err, rseg;

	sc->sc_st = ca->ca_iot;
	if (bus_space_map(sc->sc_st, ca->ca_baseaddr, MEC_NREGS, 0,
	    &sc->sc_sh) != 0) {
		printf(": can't map i/o space\n");
		return;
	}

	/* Set up DMA structures. */
	sc->sc_dmat = ca->ca_dmat;

	/*
	 * Allocate the control data structures, and create and load the
	 * DMA map for it.
	 */
	if ((err = bus_dmamem_alloc(sc->sc_dmat,
	    sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
		printf(": unable to allocate control data, error = %d\n", err);
		goto fail_0;
	}

	/*
	 * XXX needs re-think...
	 * control data structures contain whole RX data buffer, so
	 * BUS_DMA_COHERENT (which disables cache) may cause some performance
	 * issue on copying data from the RX buffer to mbuf on normal memory,
	 * though we have to make sure all bus_dmamap_sync(9) ops are called
	 * properly in that case.
	 */
	if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
	    sizeof(struct mec_control_data),
	    (caddr_t *)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
		printf(": unable to map control data, error = %d\n", err);
		goto fail_1;
	}
	memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));

	if ((err = bus_dmamap_create(sc->sc_dmat,
	    sizeof(struct mec_control_data), 1,
	    sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
		printf(": unable to create control data DMA map, error = %d\n",
		    err);
		goto fail_2;
	}
	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
	    sc->sc_control_data, sizeof(struct mec_control_data), NULL,
	    BUS_DMA_NOWAIT)) != 0) {
		printf(": unable to load control data DMA map, error = %d\n",
		    err);
		goto fail_3;
	}

	/* Create TX buffer DMA maps. */
	for (i = 0; i < MEC_NTXDESC; i++) {
		if ((err = bus_dmamap_create(sc->sc_dmat,
		    MCLBYTES, 1, MCLBYTES, 0, 0,
		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
			printf(": unable to create tx DMA map %d, error = %d\n",
			    i, err);
			goto fail_4;
		}
	}

	timeout_set(&sc->sc_tick_ch, mec_tick, sc);

	/* Use the Ethernet address from the ARCBIOS. */
	enaddr_aton(bios_enaddr, sc->sc_ac.ac_enaddr);

	/* Reset device. */
	mec_reset(sc);

	command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);

	printf(": MAC-110 rev %d, address %s\n",
	    (command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT,
	    ether_sprintf(sc->sc_ac.ac_enaddr));

	/* Done, now attach everything. */

	sc->sc_mii.mii_ifp = ifp;
	sc->sc_mii.mii_readreg = mec_mii_readreg;
	sc->sc_mii.mii_writereg = mec_mii_writereg;
	sc->sc_mii.mii_statchg = mec_statchg;

	/* Set up PHY properties. */
	ifmedia_init(&sc->sc_mii.mii_media, 0, mec_mediachange,
	    mec_mediastatus);
	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);

	child = LIST_FIRST(&sc->sc_mii.mii_phys);
	if (child == NULL) {
		/* No PHY attached. */
		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
		    0, NULL);
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
	} else {
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
		sc->sc_phyaddr = child->mii_phy;
	}

	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = mec_ioctl;
	ifp->if_start = mec_start;
	ifp->if_watchdog = mec_watchdog;
	IFQ_SET_READY(&ifp->if_snd);

	if_attach(ifp);
	IFQ_SET_MAXLEN(&ifp->if_snd, MEC_NTXDESC - 1);
	ether_ifattach(ifp);

	/* Establish interrupt handler. */
	macebus_intr_establish(NULL, ca->ca_intr, IST_EDGE, IPL_NET,
	    mec_intr, sc, sc->sc_dev.dv_xname);

	/* Set hook to stop interface on shutdown. */
	sc->sc_sdhook = shutdownhook_establish(mec_shutdown, sc);

	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt. Do this in reverse order and fall though.
	 */
 fail_4:
	for (i = 0; i < MEC_NTXDESC; i++) {
		if (sc->sc_txsoft[i].txs_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_txsoft[i].txs_dmamap);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
 fail_3:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
 fail_2:
	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
	    sizeof(struct mec_control_data));
 fail_1:
	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
 fail_0:
	return;
}
示例#30
0
void
bce_attach(struct device *parent, struct device *self, void *aux)
{
	struct bce_softc *sc = (struct bce_softc *) self;
	struct pci_attach_args *pa = aux;
	pci_chipset_tag_t pc = pa->pa_pc;
	pci_intr_handle_t ih;
	const char     *intrstr = NULL;
	caddr_t         kva;
	bus_dma_segment_t seg;
	int             rseg;
	struct ifnet   *ifp;
	pcireg_t        memtype;
	bus_addr_t      memaddr;
	bus_size_t      memsize;
	int             pmreg;
	pcireg_t        pmode;
	int             error;
	int             i;

	sc->bce_pa = *pa;
	sc->bce_dmatag = pa->pa_dmat;

	/*
	 * Map control/status registers.
	 */
	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
	if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
	    &sc->bce_bhandle, &memaddr, &memsize, 0)) {
		printf(": unable to find mem space\n");
		return;
	}

	/* Get it out of power save mode if needed. */
	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
		pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
		if (pmode == 3) {
			/*
			 * The card has lost all configuration data in
			 * this state, so punt.
			 */
			printf(": unable to wake up from power state D3\n");
			return;
		}
		if (pmode != 0) {
			printf(": waking up from power state D%d\n",
			       pmode);
			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
		}
	}

	if (pci_intr_map(pa, &ih)) {
		printf(": couldn't map interrupt\n");
		return;
	}

	intrstr = pci_intr_string(pc, ih);
	sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc,
	    self->dv_xname);
	if (sc->bce_intrhand == NULL) {
		printf(": couldn't establish interrupt");
		if (intrstr != NULL)
			printf(" at %s", intrstr);
		printf("\n");
		return;
	}

	/* reset the chip */
	bce_reset(sc);

	/*
	 * Allocate DMA-safe memory for ring descriptors.
	 * The receive, and transmit rings can not share the same
	 * 4k space, however both are allocated at once here.
	 */
	/*
	 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
	 * due to the limition above. ??
	 */
	if ((error = bus_dmamem_alloc(sc->bce_dmatag,
	    2 * PAGE_SIZE, PAGE_SIZE, 2 * PAGE_SIZE,
				      &seg, 1, &rseg, BUS_DMA_NOWAIT))) {
		printf(": unable to alloc space for ring descriptors, "
		       "error = %d\n", error);
		return;
	}

	/* map ring space to kernel */
	if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
	    2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
		printf(": unable to map DMA buffers, error = %d\n",
		    error);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}

	/* create a dma map for the ring */
	if ((error = bus_dmamap_create(sc->bce_dmatag,
	    2 * PAGE_SIZE, 1, 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT,
				       &sc->bce_ring_map))) {
		printf(": unable to create ring DMA map, error = %d\n",
		    error);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}

	/* connect the ring space to the dma map */
	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
	    2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
		printf(": unable to load ring DMA map\n");
		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}

	/* save the ring space in softc */
	sc->bce_rx_ring = (struct bce_dma_slot *) kva;
	sc->bce_tx_ring = (struct bce_dma_slot *) (kva + PAGE_SIZE);

	/* Create the transmit buffer DMA maps. */
	for (i = 0; i < BCE_NTXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES,
		    BCE_NTXFRAGS, MCLBYTES, 0, 0, &sc->bce_cdata.bce_tx_map[i])) != 0) {
			printf(": unable to create tx DMA map, error = %d\n",
			    error);
		}
		sc->bce_cdata.bce_tx_chain[i] = NULL;
	}

	/* Create the receive buffer DMA maps. */
	for (i = 0; i < BCE_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, 1,
		    MCLBYTES, 0, 0, &sc->bce_cdata.bce_rx_map[i])) != 0) {
			printf(": unable to create rx DMA map, error = %d\n",
			    error);
		}
		sc->bce_cdata.bce_rx_chain[i] = NULL;
	}

	/* Set up ifnet structure */
	ifp = &sc->bce_ac.ac_if;
	strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE);
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = bce_ioctl;
	ifp->if_start = bce_start;
	ifp->if_watchdog = bce_watchdog;
	ifp->if_init = bce_init;
	IFQ_SET_READY(&ifp->if_snd);

	ifp->if_capabilities = IFCAP_VLAN_MTU;

	/* MAC address */
	sc->bce_ac.ac_enaddr[0] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0);
	sc->bce_ac.ac_enaddr[1] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1);
	sc->bce_ac.ac_enaddr[2] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2);
	sc->bce_ac.ac_enaddr[3] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3);
	sc->bce_ac.ac_enaddr[4] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4);
	sc->bce_ac.ac_enaddr[5] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5);

	printf(": %s, address %s\n", intrstr,
	    ether_sprintf(sc->bce_ac.ac_enaddr));

	/* Initialize our media structures and probe the MII. */
	sc->bce_mii.mii_ifp = ifp;
	sc->bce_mii.mii_readreg = bce_mii_read;
	sc->bce_mii.mii_writereg = bce_mii_write;
	sc->bce_mii.mii_statchg = bce_statchg;
	ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange,
	    bce_mediastatus);
	mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);
	if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
		ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
	} else
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);

	/* get the phy */
	sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_PHY) & 0x1f;

	/*
	 * Enable activity led.
	 * XXX This should be in a phy driver, but not currently.
	 */
	bce_mii_write((struct device *) sc, 1, 26,	 /* MAGIC */
	    bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);	 /* MAGIC */

	/* enable traffic meter led mode */
	bce_mii_write((struct device *) sc, 1, 27,	 /* MAGIC */
	    bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));	 /* MAGIC */

	/* Attach the interface */
	if_attach(ifp);
	ether_ifattach(ifp);

	timeout_set(&sc->bce_timeout, bce_tick, sc);
}