示例#1
0
void
bce_attach(struct device *parent, struct device *self, void *aux)
{
	struct bce_softc *sc = (struct bce_softc *) self;
	struct pci_attach_args *pa = aux;
	pci_chipset_tag_t pc = pa->pa_pc;
	pci_intr_handle_t ih;
	const char     *intrstr = NULL;
	caddr_t         kva;
	bus_dma_segment_t seg;
	int             rseg;
	struct ifnet   *ifp;
	pcireg_t        memtype;
	bus_addr_t      memaddr;
	bus_size_t      memsize;
	int             pmreg;
	pcireg_t        pmode;
	int             error;
	int             i;

	sc->bce_pa = *pa;
	sc->bce_dmatag = pa->pa_dmat;

	/*
	 * Map control/status registers.
	 */
	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
	if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
	    &sc->bce_bhandle, &memaddr, &memsize, 0)) {
		printf(": unable to find mem space\n");
		return;
	}

	/* Get it out of power save mode if needed. */
	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
		pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
		if (pmode == 3) {
			/*
			 * The card has lost all configuration data in
			 * this state, so punt.
			 */
			printf(": unable to wake up from power state D3\n");
			return;
		}
		if (pmode != 0) {
			printf(": waking up from power state D%d\n",
			       pmode);
			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
		}
	}

	if (pci_intr_map(pa, &ih)) {
		printf(": couldn't map interrupt\n");
		return;
	}

	intrstr = pci_intr_string(pc, ih);
	sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc,
	    self->dv_xname);
	if (sc->bce_intrhand == NULL) {
		printf(": couldn't establish interrupt");
		if (intrstr != NULL)
			printf(" at %s", intrstr);
		printf("\n");
		return;
	}

	/* reset the chip */
	bce_reset(sc);

	/*
	 * Allocate DMA-safe memory for ring descriptors.
	 * The receive, and transmit rings can not share the same
	 * 4k space, however both are allocated at once here.
	 */
	/*
	 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
	 * due to the limition above. ??
	 */
	if ((error = bus_dmamem_alloc(sc->bce_dmatag,
	    2 * PAGE_SIZE, PAGE_SIZE, 2 * PAGE_SIZE,
				      &seg, 1, &rseg, BUS_DMA_NOWAIT))) {
		printf(": unable to alloc space for ring descriptors, "
		       "error = %d\n", error);
		return;
	}

	/* map ring space to kernel */
	if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
	    2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
		printf(": unable to map DMA buffers, error = %d\n",
		    error);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}

	/* create a dma map for the ring */
	if ((error = bus_dmamap_create(sc->bce_dmatag,
	    2 * PAGE_SIZE, 1, 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT,
				       &sc->bce_ring_map))) {
		printf(": unable to create ring DMA map, error = %d\n",
		    error);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}

	/* connect the ring space to the dma map */
	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
	    2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
		printf(": unable to load ring DMA map\n");
		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}

	/* save the ring space in softc */
	sc->bce_rx_ring = (struct bce_dma_slot *) kva;
	sc->bce_tx_ring = (struct bce_dma_slot *) (kva + PAGE_SIZE);

	/* Create the transmit buffer DMA maps. */
	for (i = 0; i < BCE_NTXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES,
		    BCE_NTXFRAGS, MCLBYTES, 0, 0, &sc->bce_cdata.bce_tx_map[i])) != 0) {
			printf(": unable to create tx DMA map, error = %d\n",
			    error);
		}
		sc->bce_cdata.bce_tx_chain[i] = NULL;
	}

	/* Create the receive buffer DMA maps. */
	for (i = 0; i < BCE_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, 1,
		    MCLBYTES, 0, 0, &sc->bce_cdata.bce_rx_map[i])) != 0) {
			printf(": unable to create rx DMA map, error = %d\n",
			    error);
		}
		sc->bce_cdata.bce_rx_chain[i] = NULL;
	}

	/* Set up ifnet structure */
	ifp = &sc->bce_ac.ac_if;
	strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE);
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = bce_ioctl;
	ifp->if_start = bce_start;
	ifp->if_watchdog = bce_watchdog;
	ifp->if_init = bce_init;
	IFQ_SET_READY(&ifp->if_snd);

	ifp->if_capabilities = IFCAP_VLAN_MTU;

	/* MAC address */
	sc->bce_ac.ac_enaddr[0] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0);
	sc->bce_ac.ac_enaddr[1] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1);
	sc->bce_ac.ac_enaddr[2] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2);
	sc->bce_ac.ac_enaddr[3] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3);
	sc->bce_ac.ac_enaddr[4] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4);
	sc->bce_ac.ac_enaddr[5] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5);

	printf(": %s, address %s\n", intrstr,
	    ether_sprintf(sc->bce_ac.ac_enaddr));

	/* Initialize our media structures and probe the MII. */
	sc->bce_mii.mii_ifp = ifp;
	sc->bce_mii.mii_readreg = bce_mii_read;
	sc->bce_mii.mii_writereg = bce_mii_write;
	sc->bce_mii.mii_statchg = bce_statchg;
	ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange,
	    bce_mediastatus);
	mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);
	if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
		ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
	} else
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);

	/* get the phy */
	sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_PHY) & 0x1f;

	/*
	 * Enable activity led.
	 * XXX This should be in a phy driver, but not currently.
	 */
	bce_mii_write((struct device *) sc, 1, 26,	 /* MAGIC */
	    bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);	 /* MAGIC */

	/* enable traffic meter led mode */
	bce_mii_write((struct device *) sc, 1, 27,	 /* MAGIC */
	    bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));	 /* MAGIC */

	/* Attach the interface */
	if_attach(ifp);
	ether_ifattach(ifp);

	timeout_set(&sc->bce_timeout, bce_tick, sc);
}
示例#2
0
/* initialize the interface */
int
bce_init(struct ifnet *ifp)
{
	struct bce_softc *sc = ifp->if_softc;
	u_int32_t reg_win;
	int             error;
	int             i;

	/* Cancel any pending I/O. */
	bce_stop(ifp, 0);

	/* enable pci inerrupts, bursts, and prefetch */

	/* remap the pci registers to the Sonics config registers */

	/* save the current map, so it can be restored */
	reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
	    BCE_REG_WIN);

	/* set register window to Sonics registers */
	pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
	    BCE_SONICS_WIN);

	/* enable SB to PCI interrupt */
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) |
	    SBIV_ENET0);

	/* enable prefetch and bursts for sonics-to-pci translation 2 */
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) |
	    SBTOPCI_PREF | SBTOPCI_BURST);

	/* restore to ethernet register space */
	pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
	    reg_win);

	/* Reset the chip to a known state. */
	bce_reset(sc);

	/* Initialize transmit descriptors */
	memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot));
	sc->bce_txsnext = 0;
	sc->bce_txin = 0;

	/* enable crc32 generation and set proper LED modes */
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) |
	    BCE_EMC_CRC32_ENAB | BCE_EMC_LED);

	/* reset or clear powerdown control bit  */
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) &
	    ~BCE_EMC_PDOWN);

	/* setup DMA interrupt control */
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24);	/* MAGIC */

	/* setup packet filter */
	bce_set_filter(ifp);

	/* set max frame length, account for possible VLAN tag */
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX,
	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX,
	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);

	/* set tx watermark */
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56);

	/* enable transmit */
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE);
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR,
	    sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000);	/* MAGIC */

	/*
         * Give the receive ring to the chip, and
         * start the receive DMA engine.
         */
	sc->bce_rxin = 0;

	/* clear the rx descriptor ring */
	memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot));
	/* enable receive */
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL,
	    BCE_PREPKT_HEADER_SIZE << 1 | XC_XE);
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR,
	    sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000);		/* MAGIC */

	/* Initialize receive descriptors */
	for (i = 0; i < BCE_NRXDESC; i++) {
		if (sc->bce_cdata.bce_rx_chain[i] == NULL) {
			if ((error = bce_add_rxbuf(sc, i)) != 0) {
				printf("%s: unable to allocate or map rx(%d) "
				    "mbuf, error = %d\n", sc->bce_dev.dv_xname,
				    i, error);
				bce_rxdrain(sc);
				return (error);
			}
		} else
			BCE_INIT_RXDESC(sc, i);
	}

	/* Enable interrupts */
	sc->bce_intmask =
	    I_XI | I_RI | I_XU | I_RO | I_RU | I_DE | I_PD | I_PC | I_TO;
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK,
	    sc->bce_intmask);

	/* start the receive dma */
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR,
	    BCE_NRXDESC * sizeof(struct bce_dma_slot));

	/* set media */
	mii_mediachg(&sc->bce_mii);

	/* turn on the ethernet mac */
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
	    BCE_ENET_CTL) | EC_EE);

	/* start timer */
	timeout_add(&sc->bce_timeout, hz);

	/* mark as running, and no outputs active */
	ifp->if_flags |= IFF_RUNNING;
	ifp->if_flags &= ~IFF_OACTIVE;

	return 0;
}
示例#3
0
static void
bce_attach(device_t parent, device_t self, void *aux)
{
	struct bce_softc *sc = device_private(self);
	struct pci_attach_args *pa = aux;
	const struct bce_product *bp;
	pci_chipset_tag_t pc = pa->pa_pc;
	pci_intr_handle_t ih;
	const char     *intrstr = NULL;
	uint32_t	command;
	pcireg_t	memtype, pmode;
	bus_addr_t	memaddr;
	bus_size_t	memsize;
	void		*kva;
	bus_dma_segment_t seg;
	int             error, i, pmreg, rseg;
	struct ifnet   *ifp;
	char intrbuf[PCI_INTRSTR_LEN];

	sc->bce_dev = self;

	bp = bce_lookup(pa);
	KASSERT(bp != NULL);

	sc->bce_pa = *pa;

	/* BCM440x can only address 30 bits (1GB) */
	if (bus_dmatag_subregion(pa->pa_dmat, 0, (1 << 30),
	    &(sc->bce_dmatag), BUS_DMA_NOWAIT) != 0) {
		aprint_error_dev(self,
		    "WARNING: failed to restrict dma range,"
		    " falling back to parent bus dma range\n");
		sc->bce_dmatag = pa->pa_dmat;
	}

	 aprint_naive(": Ethernet controller\n");
	 aprint_normal(": %s\n", bp->bp_name);

	/*
	 * Map control/status registers.
	 */
	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
	command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);

	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
		aprint_error_dev(self, "failed to enable memory mapping!\n");
		return;
	}
	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
	switch (memtype) {
	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
		if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
		    &sc->bce_bhandle, &memaddr, &memsize) == 0)
			break;
	default:
		aprint_error_dev(self, "unable to find mem space\n");
		return;
	}

	/* Get it out of power save mode if needed. */
	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, NULL)) {
		pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
		if (pmode == 3) {
			/*
			 * The card has lost all configuration data in
			 * this state, so punt.
			 */
			aprint_error_dev(self,
			    "unable to wake up from power state D3\n");
			return;
		}
		if (pmode != 0) {
			aprint_normal_dev(self,
			    "waking up from power state D%d\n", pmode);
			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
		}
	}
	if (pci_intr_map(pa, &ih)) {
		aprint_error_dev(self, "couldn't map interrupt\n");
		return;
	}
	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));

	sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc);

	if (sc->bce_intrhand == NULL) {
		aprint_error_dev(self, "couldn't establish interrupt\n");
		if (intrstr != NULL)
			aprint_error(" at %s", intrstr);
		aprint_error("\n");
		return;
	}
	aprint_normal_dev(self, "interrupting at %s\n", intrstr);

	/* reset the chip */
	bce_reset(sc);

	/*
	 * Allocate DMA-safe memory for ring descriptors.
	 * The receive, and transmit rings can not share the same
	 * 4k space, however both are allocated at once here.
	 */
	/*
	 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
	 * due to the limition above. ??
	 */
	if ((error = bus_dmamem_alloc(sc->bce_dmatag,
	    2 * PAGE_SIZE, PAGE_SIZE, 2 * PAGE_SIZE,
	    &seg, 1, &rseg, BUS_DMA_NOWAIT))) {
		aprint_error_dev(self,
		    "unable to alloc space for ring descriptors, error = %d\n",
		    error);
		return;
	}
	/* map ring space to kernel */
	if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
	    2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
		aprint_error_dev(self,
		    "unable to map DMA buffers, error = %d\n", error);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}
	/* create a dma map for the ring */
	if ((error = bus_dmamap_create(sc->bce_dmatag,
	    2 * PAGE_SIZE, 1, 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT,
	    &sc->bce_ring_map))) {
		aprint_error_dev(self,
		    "unable to create ring DMA map, error = %d\n", error);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}
	/* connect the ring space to the dma map */
	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
	    2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}
	/* save the ring space in softc */
	sc->bce_rx_ring = (struct bce_dma_slot *) kva;
	sc->bce_tx_ring = (struct bce_dma_slot *) ((char *)kva + PAGE_SIZE);

	/* Create the transmit buffer DMA maps. */
	for (i = 0; i < BCE_NTXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES,
		    BCE_NTXFRAGS, MCLBYTES, 0, 0, &sc->bce_cdata.bce_tx_map[i])) != 0) {
			aprint_error_dev(self,
			    "unable to create tx DMA map, error = %d\n", error);
		}
		sc->bce_cdata.bce_tx_chain[i] = NULL;
	}

	/* Create the receive buffer DMA maps. */
	for (i = 0; i < BCE_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, 1,
		    MCLBYTES, 0, 0, &sc->bce_cdata.bce_rx_map[i])) != 0) {
			aprint_error_dev(self,
			    "unable to create rx DMA map, error = %d\n", error);
		}
		sc->bce_cdata.bce_rx_chain[i] = NULL;
	}

	/* Set up ifnet structure */
	ifp = &sc->ethercom.ec_if;
	strcpy(ifp->if_xname, device_xname(self));
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = bce_ioctl;
	ifp->if_start = bce_start;
	ifp->if_watchdog = bce_watchdog;
	ifp->if_init = bce_init;
	ifp->if_stop = bce_stop;
	IFQ_SET_READY(&ifp->if_snd);

	/* Initialize our media structures and probe the MII. */

	sc->bce_mii.mii_ifp = ifp;
	sc->bce_mii.mii_readreg = bce_mii_read;
	sc->bce_mii.mii_writereg = bce_mii_write;
	sc->bce_mii.mii_statchg = bce_statchg;

	sc->ethercom.ec_mii = &sc->bce_mii;
	ifmedia_init(&sc->bce_mii.mii_media, 0, ether_mediachange,
	    ether_mediastatus);
	mii_attach(sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, MIIF_FORCEANEG|MIIF_DOPAUSE);
	if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
		ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
	} else
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);
	/* get the phy */
	sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_PHY) & 0x1f;
	/*
	 * Enable activity led.
	 * XXX This should be in a phy driver, but not currently.
	 */
	bce_mii_write(sc->bce_dev, 1, 26,	 /* MAGIC */
	    bce_mii_read(sc->bce_dev, 1, 26) & 0x7fff);	 /* MAGIC */
	/* enable traffic meter led mode */
	bce_mii_write(sc->bce_dev, 1, 27,	 /* MAGIC */
	    bce_mii_read(sc->bce_dev, 1, 27) | (1 << 6));	 /* MAGIC */

	/* Attach the interface */
	if_attach(ifp);
	sc->enaddr[0] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET0);
	sc->enaddr[1] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET1);
	sc->enaddr[2] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET2);
	sc->enaddr[3] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET3);
	sc->enaddr[4] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET4);
	sc->enaddr[5] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET5);
	aprint_normal_dev(self, "Ethernet address %s\n",
	    ether_sprintf(sc->enaddr));
	ether_ifattach(ifp, sc->enaddr);
	rnd_attach_source(&sc->rnd_source, device_xname(self),
	    RND_TYPE_NET, 0);
	callout_init(&sc->bce_timeout, 0);

	if (pmf_device_register(self, NULL, bce_resume))
		pmf_class_network_register(self, ifp);
	else
		aprint_error_dev(self, "couldn't establish power handler\n");
}