/*
 * Interface exists: make available by filling in network interface
 * record.  System will initialize the interface when it is ready
 * to accept packets.
 */
int
snsetup(struct sn_softc	*sc, uint8_t *lladdr)
{
	struct ifnet *ifp = &sc->sc_if;
	uint8_t	*p;
	uint8_t	*pp;
	int	i;

	if (sc->space == NULL) {
		aprint_error_dev(sc->sc_dev,
		    "memory allocation for descriptors failed\n");
		return 1;
	}

	/*
	 * Put the pup in reset mode (sninit() will fix it later),
	 * stop the timer, disable all interrupts and clear any interrupts.
	 */
	NIC_PUT(sc, SNR_CR, CR_STP);
	wbflush();
	NIC_PUT(sc, SNR_CR, CR_RST);
	wbflush();
	NIC_PUT(sc, SNR_IMR, 0);
	wbflush();
	NIC_PUT(sc, SNR_ISR, ISR_ALL);
	wbflush();

	/*
	 * because the SONIC is basically 16bit device it 'concatenates'
	 * a higher buffer address to a 16 bit offset--this will cause wrap
	 * around problems near the end of 64k !!
	 */
	p = sc->space;
	pp = (uint8_t *)roundup((int)p, PAGE_SIZE);
	p = pp;

	for (i = 0; i < NRRA; i++) {
		sc->p_rra[i] = (void *)p;
		sc->v_rra[i] = SONIC_GETDMA(p);
		p += RXRSRC_SIZE(sc);
	}
	sc->v_rea = SONIC_GETDMA(p);

	p = (uint8_t *)SOALIGN(sc, p);

	sc->p_cda = (void *)(p);
	sc->v_cda = SONIC_GETDMA(p);
	p += CDA_SIZE(sc);

	p = (uint8_t *)SOALIGN(sc, p);

	for (i = 0; i < NTDA; i++) {
		struct mtd *mtdp = &sc->mtda[i];
		mtdp->mtd_txp = (void *)p;
		mtdp->mtd_vtxp = SONIC_GETDMA(p);
		p += TXP_SIZE(sc);
	}

	p = (uint8_t *)SOALIGN(sc, p);

	if ((p - pp) > PAGE_SIZE) {
		aprint_error_dev(sc->sc_dev, "sizeof RRA (%ld) + CDA (%ld) +"
		    "TDA (%ld) > PAGE_SIZE (%d). Punt!\n",
		    (ulong)sc->p_cda - (ulong)sc->p_rra[0],
		    (ulong)sc->mtda[0].mtd_txp - (ulong)sc->p_cda,
		    (ulong)p - (ulong)sc->mtda[0].mtd_txp,
		    PAGE_SIZE);
		return 1;
	}

	p = pp + PAGE_SIZE;
	pp = p;

	sc->sc_nrda = PAGE_SIZE / RXPKT_SIZE(sc);
	sc->p_rda = (void *)p;
	sc->v_rda = SONIC_GETDMA(p);

	p = pp + PAGE_SIZE;

	for (i = 0; i < NRBA; i++) {
		sc->rbuf[i] = (void *)p;
		p += PAGE_SIZE;
	}

	pp = p;
	for (i = 0; i < NTDA; i++) {
		struct mtd *mtdp = &sc->mtda[i];

		mtdp->mtd_buf = p;
		mtdp->mtd_vbuf = SONIC_GETDMA(p);
		p += TXBSIZE;
	}

#ifdef SNDEBUG
	camdump(sc);
#endif
	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
	    ether_sprintf(lladdr));

#ifdef SNDEBUG
	aprint_debug_dev(sc->sc_dev, "buffers: rra=%p cda=%p rda=%p tda=%p\n",
	    device_xname(sc->sc_dev), sc->p_rra[0], sc->p_cda,
	    sc->p_rda, sc->mtda[0].mtd_txp);
#endif

	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
	ifp->if_softc = sc;
	ifp->if_ioctl = snioctl;
	ifp->if_start = snstart;
	ifp->if_flags =
	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
	ifp->if_watchdog = snwatchdog;
	if_attach(ifp);
	ether_ifattach(ifp, lladdr);

	return 0;
}
Exemple #2
0
	 */
	memset(sc->sc_ac.ac_enaddr, 0xff, ETHER_ADDR_LEN);
	
	/* Check if there is already a MAC address in the register */
	if ((smsc_read_reg(sc, SMSC_MAC_ADDRL, &mac_l) == 0) &&
	    (smsc_read_reg(sc, SMSC_MAC_ADDRH, &mac_h) == 0)) {
		sc->sc_ac.ac_enaddr[5] = (uint8_t)((mac_h >> 8) & 0xff);
		sc->sc_ac.ac_enaddr[4] = (uint8_t)((mac_h) & 0xff);
		sc->sc_ac.ac_enaddr[3] = (uint8_t)((mac_l >> 24) & 0xff);
		sc->sc_ac.ac_enaddr[2] = (uint8_t)((mac_l >> 16) & 0xff);
		sc->sc_ac.ac_enaddr[1] = (uint8_t)((mac_l >> 8) & 0xff);
		sc->sc_ac.ac_enaddr[0] = (uint8_t)((mac_l) & 0xff);
	}
	
	printf("%s: address %s\n", sc->sc_dev.dv_xname,
	    ether_sprintf(sc->sc_ac.ac_enaddr));
	
	/* Initialise the chip for the first time */
	smsc_chip_init(sc);

	IFQ_SET_READY(&ifp->if_snd);

	/* Initialize MII/media info. */
	mii = &sc->sc_mii;
	mii->mii_ifp = ifp;
	mii->mii_readreg = smsc_miibus_readreg;
	mii->mii_writereg = smsc_miibus_writereg;
	mii->mii_statchg = smsc_miibus_statchg;
	mii->mii_flags = MIIF_AUTOTSLEEP;

	ifmedia_init(&mii->mii_media, 0, smsc_ifmedia_upd, smsc_ifmedia_sts);
Exemple #3
0
const char*
ath_hal_ether_sprintf(const u_int8_t *mac)
{
    return ether_sprintf(mac);
}
Exemple #4
0
/*
 * Called by devopen after it sets f->f_dev to our devsw entry.
 * This opens the low-level device and sets f->f_devdata.
 * This is declared with variable arguments...
 */
static int
net_open(struct open_file *f, ...)
{
	struct iodesc *d;
	va_list args;
	char *devname;		/* Device part of file name (or NULL). */
	int error = 0;

	va_start(args, f);
	devname = va_arg(args, char*);
	va_end(args);

#ifdef	NETIF_OPEN_CLOSE_ONCE
	/* Before opening another interface, close the previous one first. */
	if (netdev_sock >= 0 && strcmp(devname, netdev_name) != 0)
		net_cleanup();
#endif

	/* On first open, do netif open, mount, etc. */
	if (netdev_opens == 0) {
		/* Find network interface. */
		if (netdev_sock < 0) {
			netdev_sock = netif_open(devname);
			if (netdev_sock < 0) {
				printf("net_open: netif_open() failed\n");
				return (ENXIO);
			}
			netdev_name = strdup(devname);
#ifdef	NETIF_DEBUG
			if (debug)
				printf("net_open: netif_open() succeeded\n");
#endif
		}
		/*
		 * If network params were not set by netif_open(), try to get
		 * them via bootp, rarp, etc.
		 */
		if (rootip.s_addr == 0) {
			/* Get root IP address, and path, etc. */
			error = net_getparams(netdev_sock);
			if (error) {
				/* getparams makes its own noise */
				free(netdev_name);
				netif_close(netdev_sock);
				netdev_sock = -1;
				return (error);
			}
		}
		/*
		 * Set the variables required by the kernel's nfs_diskless
		 * mechanism.  This is the minimum set of variables required to
		 * mount a root filesystem without needing to obtain additional
		 * info from bootp or other sources.
		 */
		d = socktodesc(netdev_sock);
		setenv("boot.netif.hwaddr", ether_sprintf(d->myea), 1);
		setenv("boot.netif.ip", inet_ntoa(myip), 1);
		setenv("boot.netif.netmask", intoa(netmask), 1);
		setenv("boot.netif.gateway", inet_ntoa(gateip), 1);
		setenv("boot.netif.server", inet_ntoa(rootip), 1);
		if (netproto == NET_TFTP) {
			setenv("boot.tftproot.server", inet_ntoa(rootip), 1);
			setenv("boot.tftproot.path", rootpath, 1);
		} else if (netproto == NET_NFS) {
			setenv("boot.nfsroot.server", inet_ntoa(rootip), 1);
			setenv("boot.nfsroot.path", rootpath, 1);
		}
		if (intf_mtu != 0) {
			char mtu[16];
			sprintf(mtu, "%u", intf_mtu);
			setenv("boot.netif.mtu", mtu, 1);
		}

	}
	netdev_opens++;
	f->f_devdata = &netdev_sock;
	return (error);
}
/* 
 * Examine and potentially adjust the transmit rate.
 */
static void
ath_rate_ctl(void *arg, struct ieee80211_node *ni)
{
	struct ath_softc *sc = arg;
	struct onoe_node *on = ATH_NODE_ONOE(ATH_NODE(ni));
	struct ieee80211_rateset *rs = &ni->ni_rates;
	int dir = 0, nrate, enough;

	/*
	 * Rate control
	 * XXX: very primitive version.
	 */
	enough = (on->on_tx_ok + on->on_tx_err >= 10);

	/* no packet reached -> down */
	if (on->on_tx_err > 0 && on->on_tx_ok == 0)
		dir = -1;

	/* all packets needs retry in average -> down */
	if (enough && on->on_tx_ok < on->on_tx_retr)
		dir = -1;

	/* no error and less than rate_raise% of packets need retry -> up */
	if (enough && on->on_tx_err == 0 &&
	    on->on_tx_retr < (on->on_tx_ok * ath_rate_raise) / 100)
		dir = 1;

	DPRINTF(sc, "%s: ok %d err %d retr %d upper %d dir %d\n",
		ether_sprintf(ni->ni_macaddr),
		on->on_tx_ok, on->on_tx_err, on->on_tx_retr,
		on->on_tx_upper, dir);

	nrate = ni->ni_txrate;
	switch (dir) {
	case 0:
		if (enough && on->on_tx_upper > 0)
			on->on_tx_upper--;
		break;
	case -1:
		if (nrate > 0) {
			nrate--;
			sc->sc_stats.ast_rate_drop++;
		}
		on->on_tx_upper = 0;
		break;
	case 1:
		/* raise rate if we hit rate_raise_threshold */
		if (++on->on_tx_upper < ath_rate_raise_threshold)
			break;
		on->on_tx_upper = 0;
		if (nrate + 1 < rs->rs_nrates) {
			nrate++;
			sc->sc_stats.ast_rate_raise++;
		}
		break;
	}

	if (nrate != ni->ni_txrate) {
		DPRINTF(sc, "%s: %dM -> %dM (%d ok, %d err, %d retr)\n",
		    __func__,
		    (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL) / 2,
		    (rs->rs_rates[nrate] & IEEE80211_RATE_VAL) / 2,
		    on->on_tx_ok, on->on_tx_err, on->on_tx_retr);
		ath_rate_update(sc, ni, nrate);
	} else if (enough)
		on->on_tx_ok = on->on_tx_err = on->on_tx_retr = 0;
}
void
bce_attach(struct device *parent, struct device *self, void *aux)
{
	struct bce_softc *sc = (struct bce_softc *) self;
	struct pci_attach_args *pa = aux;
	pci_chipset_tag_t pc = pa->pa_pc;
	pci_intr_handle_t ih;
	const char     *intrstr = NULL;
	caddr_t         kva;
	bus_dma_segment_t seg;
	int             rseg;
	struct ifnet   *ifp;
	pcireg_t        memtype;
	bus_addr_t      memaddr;
	bus_size_t      memsize;
	int             pmreg;
	pcireg_t        pmode;
	int             error;
	int             i;

	sc->bce_pa = *pa;
	sc->bce_dmatag = pa->pa_dmat;

	/*
	 * Map control/status registers.
	 */
	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
	if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
	    &sc->bce_bhandle, &memaddr, &memsize, 0)) {
		printf(": unable to find mem space\n");
		return;
	}

	/* Get it out of power save mode if needed. */
	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
		pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
		if (pmode == 3) {
			/*
			 * The card has lost all configuration data in
			 * this state, so punt.
			 */
			printf(": unable to wake up from power state D3\n");
			return;
		}
		if (pmode != 0) {
			printf(": waking up from power state D%d\n",
			       pmode);
			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
		}
	}

	if (pci_intr_map(pa, &ih)) {
		printf(": couldn't map interrupt\n");
		return;
	}

	intrstr = pci_intr_string(pc, ih);
	sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc,
	    self->dv_xname);
	if (sc->bce_intrhand == NULL) {
		printf(": couldn't establish interrupt");
		if (intrstr != NULL)
			printf(" at %s", intrstr);
		printf("\n");
		return;
	}

	/* reset the chip */
	bce_reset(sc);

	/*
	 * Allocate DMA-safe memory for ring descriptors.
	 * The receive, and transmit rings can not share the same
	 * 4k space, however both are allocated at once here.
	 */
	/*
	 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
	 * due to the limition above. ??
	 */
	if ((error = bus_dmamem_alloc(sc->bce_dmatag,
	    2 * PAGE_SIZE, PAGE_SIZE, 2 * PAGE_SIZE,
				      &seg, 1, &rseg, BUS_DMA_NOWAIT))) {
		printf(": unable to alloc space for ring descriptors, "
		       "error = %d\n", error);
		return;
	}

	/* map ring space to kernel */
	if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
	    2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
		printf(": unable to map DMA buffers, error = %d\n",
		    error);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}

	/* create a dma map for the ring */
	if ((error = bus_dmamap_create(sc->bce_dmatag,
	    2 * PAGE_SIZE, 1, 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT,
				       &sc->bce_ring_map))) {
		printf(": unable to create ring DMA map, error = %d\n",
		    error);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}

	/* connect the ring space to the dma map */
	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
	    2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
		printf(": unable to load ring DMA map\n");
		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}

	/* save the ring space in softc */
	sc->bce_rx_ring = (struct bce_dma_slot *) kva;
	sc->bce_tx_ring = (struct bce_dma_slot *) (kva + PAGE_SIZE);

	/* Create the transmit buffer DMA maps. */
	for (i = 0; i < BCE_NTXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES,
		    BCE_NTXFRAGS, MCLBYTES, 0, 0, &sc->bce_cdata.bce_tx_map[i])) != 0) {
			printf(": unable to create tx DMA map, error = %d\n",
			    error);
		}
		sc->bce_cdata.bce_tx_chain[i] = NULL;
	}

	/* Create the receive buffer DMA maps. */
	for (i = 0; i < BCE_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, 1,
		    MCLBYTES, 0, 0, &sc->bce_cdata.bce_rx_map[i])) != 0) {
			printf(": unable to create rx DMA map, error = %d\n",
			    error);
		}
		sc->bce_cdata.bce_rx_chain[i] = NULL;
	}

	/* Set up ifnet structure */
	ifp = &sc->bce_ac.ac_if;
	strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE);
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = bce_ioctl;
	ifp->if_start = bce_start;
	ifp->if_watchdog = bce_watchdog;
	ifp->if_init = bce_init;
	IFQ_SET_READY(&ifp->if_snd);

	ifp->if_capabilities = IFCAP_VLAN_MTU;

	/* MAC address */
	sc->bce_ac.ac_enaddr[0] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0);
	sc->bce_ac.ac_enaddr[1] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1);
	sc->bce_ac.ac_enaddr[2] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2);
	sc->bce_ac.ac_enaddr[3] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3);
	sc->bce_ac.ac_enaddr[4] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4);
	sc->bce_ac.ac_enaddr[5] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5);

	printf(": %s, address %s\n", intrstr,
	    ether_sprintf(sc->bce_ac.ac_enaddr));

	/* Initialize our media structures and probe the MII. */
	sc->bce_mii.mii_ifp = ifp;
	sc->bce_mii.mii_readreg = bce_mii_read;
	sc->bce_mii.mii_writereg = bce_mii_write;
	sc->bce_mii.mii_statchg = bce_statchg;
	ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange,
	    bce_mediastatus);
	mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);
	if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
		ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
	} else
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);

	/* get the phy */
	sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_PHY) & 0x1f;

	/*
	 * Enable activity led.
	 * XXX This should be in a phy driver, but not currently.
	 */
	bce_mii_write((struct device *) sc, 1, 26,	 /* MAGIC */
	    bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);	 /* MAGIC */

	/* enable traffic meter led mode */
	bce_mii_write((struct device *) sc, 1, 27,	 /* MAGIC */
	    bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));	 /* MAGIC */

	/* Attach the interface */
	if_attach(ifp);
	ether_ifattach(ifp);

	timeout_set(&sc->bce_timeout, bce_tick, sc);
}
Exemple #7
0
int wlan_mlme_deauth_request(wlan_if_t vaphandle, u_int8_t *macaddr, IEEE80211_REASON_CODE reason)
{
    struct ieee80211vap      *vap = vaphandle;
    struct ieee80211_node    *ni;
    int                      error = 0;

    IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s\n", __func__);

    /*
     * if a node exist with the given address already , use it.
     * if not use bss node.
     */
    ni = ieee80211_vap_find_node(vap, macaddr);
    if (ni == NULL) {
        if (IEEE80211_ADDR_EQ(macaddr, IEEE80211_GET_BCAST_ADDR(vap->iv_ic)))
            ni = ieee80211_ref_node(vap->iv_bss);
        else{
            error = -EIO;
            goto exit;
        }
    }

    /* Send deauth frame */
    error = ieee80211_send_deauth(ni, reason);

    /* Node needs to be removed from table as well, do it only for AP/IBSS now */
#if ATH_SUPPORT_IBSS
    if ((vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) && ni != vap->iv_bss) {
#else
    if (vap->iv_opmode == IEEE80211_M_HOSTAP && ni != vap->iv_bss) {
#if ATH_SUPPORT_AOW
        ieee80211_aow_join_indicate(ni->ni_ic, AOW_STA_DISCONNECTED, ni);
#endif  /* ATH_SUPPORT_AOW */
#endif  /* ATH_SUPPORT_IBSS */
        IEEE80211_NODE_LEAVE(ni);
    }        

    /* claim node immediately */
    ieee80211_free_node(ni);

    if (error) {
        goto exit;
    }

    /* 
     * Call MLME confirmation handler => mlme_deauth_complete 
     * This should reflect the tx completion status of the deauth frame,
     * but since we don't have per frame completion, we'll always indicate success here. 
     */
    IEEE80211_DELIVER_EVENT_MLME_DEAUTH_COMPLETE(vap,macaddr, IEEE80211_STATUS_SUCCESS); 

exit:
    return error;
}

int wlan_mlme_disassoc_request(wlan_if_t vaphandle, u_int8_t *macaddr, IEEE80211_REASON_CODE reason)
{
    struct ieee80211vap      *vap = vaphandle;
    struct ieee80211_node    *ni;
    int                      error = 0;

    IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s\n", __func__);

    /* Broadcast Addr - disassociate all stations */
    if (IEEE80211_ADDR_EQ(macaddr, IEEE80211_GET_BCAST_ADDR(vap->iv_ic))) {
        if (vap->iv_opmode == IEEE80211_M_STA) {
            IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME,
                              "%s: unexpected station vap with all 0xff mac address", __func__);
            ASSERT(0);
            goto exit;
		} else {
            /* Iterate station list only when PMF is not enabled */
            if (!wlan_vap_is_pmf_enabled(vap)) {
                wlan_iterate_station_list(vap, sta_disassoc, NULL);
                goto exit;
            }
        }
    }

    /*
     * if a node exist with the given address already , use it.
     * if not use bss node.
     */
    ni = ieee80211_vap_find_node(vap, macaddr);
    if (ni == NULL) {
        if (IEEE80211_ADDR_EQ(macaddr, IEEE80211_GET_BCAST_ADDR(vap->iv_ic)))
            ni = ieee80211_ref_node(vap->iv_bss);
        else{
            error = -EIO;
            goto exit;
        }
    }

    /* Send disassoc frame */
    error = ieee80211_send_disassoc(ni, reason);

    /* Node needs to be removed from table as well, do it only for AP now */
    if (vap->iv_opmode == IEEE80211_M_HOSTAP  && ni != vap->iv_bss) {
#if ATH_SUPPORT_AOW
        ieee80211_aow_join_indicate(ni->ni_ic, AOW_STA_DISCONNECTED, ni);
#endif  /* ATH_SUPPORT_AOW */
        IEEE80211_NODE_LEAVE(ni);
    }        

    /* claim node immediately */
    ieee80211_free_node(ni);

    if (error) {
        goto exit;
    }

    /* 
     * Call MLME confirmation handler => mlme_disassoc_complete 
     * This should reflect the tx completion status of the disassoc frame,
     * but since we don't have per frame completion, we'll always indicate success here. 
     */
    IEEE80211_DELIVER_EVENT_MLME_DISASSOC_COMPLETE(vap, macaddr, reason, IEEE80211_STATUS_SUCCESS); 

exit:
    return error;
}


int wlan_mlme_start_bss(wlan_if_t vaphandle)
{
    struct ieee80211vap           *vap = vaphandle;
    struct ieee80211_mlme_priv    *mlme_priv = vap->iv_mlme_priv;
    int                           error = 0;

    IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s\n", __func__);

    switch(vap->iv_opmode) {
    case IEEE80211_M_IBSS:
        IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s: Create adhoc bss\n", __func__);
        /* Reset state */
        mlme_priv->im_connection_up = 0;

        error = mlme_create_adhoc_bss(vap);
        break;
    case IEEE80211_M_MONITOR:
    case IEEE80211_M_HOSTAP:
    case IEEE80211_M_BTAMP:
        /* 
         * start the AP . the channel/ssid should have been setup already.
         */
        IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s: Create infrastructure(AP) bss\n", __func__);
        error = mlme_create_infra_bss(vap);
        break;
    default:
        ASSERT(0);
    }

    return error;
}

bool wlan_coext_enabled(wlan_if_t vaphandle)
{
    struct ieee80211com    *ic = vaphandle->iv_ic;

    return (ic->ic_flags & IEEE80211_F_COEXT_DISABLE) ? FALSE : TRUE;
}

void wlan_determine_cw(wlan_if_t vaphandle, wlan_chan_t channel)
{
    struct ieee80211com    *ic = vaphandle->iv_ic;
    int is_chan_ht40 = channel->ic_flags & (IEEE80211_CHAN_11NG_HT40PLUS |
                                            IEEE80211_CHAN_11NG_HT40MINUS);

    if (is_chan_ht40 && (channel->ic_flags & IEEE80211_CHAN_HT40INTOL)) {
        ic->ic_bss_to20(ic);
    }
}


static void
sta_deauth(void *arg, struct ieee80211_node *ni)
{
    struct ieee80211vap    *vap = ni->ni_vap;
    u_int8_t macaddr[6];

    IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s: deauth station %s \n",
                      __func__,ether_sprintf(ni->ni_macaddr));
    IEEE80211_ADDR_COPY(macaddr, ni->ni_macaddr);
    if (ni->ni_associd) {
        /*
         * if it is associated, then send disassoc.
         */
        ieee80211_send_deauth(ni, IEEE80211_REASON_AUTH_LEAVE);
    }
    IEEE80211_NODE_LEAVE(ni);
    IEEE80211_DELIVER_EVENT_MLME_DEAUTH_INDICATION(vap, macaddr, IEEE80211_REASON_AUTH_LEAVE);
}

static void
sta_disassoc(void *arg, struct ieee80211_node *ni)
{
    struct ieee80211vap    *vap = ni->ni_vap;
    u_int8_t macaddr[6];

    IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s: disassoc station %s \n",
                      __func__,ether_sprintf(ni->ni_macaddr));
    IEEE80211_ADDR_COPY(macaddr, ni->ni_macaddr);
    if (ni->ni_associd) {
        /*
         * if it is associated, then send disassoc.
         */
        ieee80211_send_disassoc(ni, IEEE80211_REASON_ASSOC_LEAVE);
#if ATH_SUPPORT_AOW
        ieee80211_aow_join_indicate(ni->ni_ic, AOW_STA_DISCONNECTED, ni);
#endif  /* ATH_SUPPORT_AOW */
    }
    IEEE80211_NODE_LEAVE(ni);
    IEEE80211_DELIVER_EVENT_MLME_DISASSOC_COMPLETE(vap, macaddr, 
                                                     IEEE80211_REASON_ASSOC_LEAVE, IEEE80211_STATUS_SUCCESS); 
}


int wlan_mlme_stop_bss(wlan_if_t vaphandle, int flags)
{
#define WAIT_RX_INTERVAL 10000
    u_int32_t                       elapsed_time = 0;
    struct ieee80211vap             *vap = vaphandle;
    struct ieee80211_mlme_priv      *mlme_priv = vap->iv_mlme_priv;
    int                             error = 0;

    IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s flags = 0x%x\n", __func__, flags);

    /*
     * Wait for current rx path to finish. Assume only one rx thread.
     */
    if (flags & WLAN_MLME_STOP_BSS_F_WAIT_RX_DONE) {
        do {
            if (OS_ATOMIC_CMPXCHG(&vap->iv_rx_gate, 0, 1) == 0) {
                break;
            }

            OS_SLEEP(WAIT_RX_INTERVAL);
            elapsed_time += WAIT_RX_INTERVAL;

            if (elapsed_time > (100 * WAIT_RX_INTERVAL))
               ieee80211_note (vap,"%s: Rx pending count stuck. Investigate!!!\n", __func__);
        } while (1);
    }

    switch(vap->iv_opmode) {
#if UMAC_SUPPORT_IBSS
    case IEEE80211_M_IBSS:
        mlme_stop_adhoc_bss(vap, flags);
        break;
#endif
    case IEEE80211_M_HOSTAP:
    case IEEE80211_M_BTAMP:
        /* disassoc/deauth all stations */
        IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s: dsassocing/deauth all stations \n", __func__);
        if(vap->iv_send_deauth)
            wlan_iterate_station_list(vap, sta_deauth, NULL);
        else
            wlan_iterate_station_list(vap, sta_disassoc, NULL);
        break;

    case IEEE80211_M_STA:
        /* There should be no mlme requests pending */
        ASSERT(vap->iv_mlme_priv->im_request_type == MLME_REQ_NONE);

        /* Reset state variables */
        mlme_priv->im_connection_up = 0;
        mlme_sta_swbmiss_timer_stop(vap);
        ieee80211_sta_leave(vap->iv_bss);
        break;

    default:
        break;
    }

    if (flags & WLAN_MLME_STOP_BSS_F_FORCE_STOP_RESET) {
        /* put vap in init state */
        ieee80211_vap_stop(vap, TRUE);
    } else {
        /* put vap in stopping state */
        if (flags & WLAN_MLME_STOP_BSS_F_STANDBY)
            ieee80211_vap_standby(vap);
        else
            ieee80211_vap_stop(vap, FALSE);
    }

    if (!(flags & WLAN_MLME_STOP_BSS_F_NO_RESET))
        error = ieee80211_reset_bss(vap);

    /*
     * Release the rx mutex.
     */
    if (flags & WLAN_MLME_STOP_BSS_F_WAIT_RX_DONE) {
        (void) OS_ATOMIC_CMPXCHG(&vap->iv_rx_gate, 1, 0);
    }

    return error;
#undef WAIT_RX_INTERVAL
}
/*
 * Send 4-Way Handshake Message 3 to the supplicant.
 */
int
ieee80211_send_4way_msg3(struct ieee80211com *ic, struct ieee80211_node *ni)
{
	struct ieee80211_eapol_key *key;
	struct ieee80211_key *k = NULL;
	struct mbuf *m;
	u_int16_t info, keylen;
	u_int8_t *frm;

	ni->ni_rsn_state = RSNA_PTKINITNEGOTIATING;
	if (++ni->ni_rsn_retries > 3) {
		IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
		    IEEE80211_REASON_4WAY_TIMEOUT);
		ieee80211_node_leave(ic, ni);
		return 0;
	}
	if (ni->ni_rsnprotos == IEEE80211_PROTO_RSN) {
		k = &ic->ic_nw_keys[ic->ic_def_txkey];
		m = ieee80211_get_eapol_key(M_DONTWAIT, MT_DATA,
		    2 + IEEE80211_RSNIE_MAXLEN + 2 + 6 + k->k_len + 15 +
		    ((ni->ni_flags & IEEE80211_NODE_MFP) ? 2 + 28 : 0));
	} else { /* WPA */
		m = ieee80211_get_eapol_key(M_DONTWAIT, MT_DATA,
		    2 + IEEE80211_WPAIE_MAXLEN +
		    ((ni->ni_flags & IEEE80211_NODE_MFP) ? 2 + 28 : 0));
	}
	if (m == NULL)
		return ENOMEM;
	key = mtod(m, struct ieee80211_eapol_key *);
	memset(key, 0, sizeof(*key));

	info = EAPOL_KEY_PAIRWISE | EAPOL_KEY_KEYACK | EAPOL_KEY_KEYMIC;
	if (ni->ni_rsncipher != IEEE80211_CIPHER_USEGROUP)
		info |= EAPOL_KEY_INSTALL;

	/* use same nonce as in Message 1 */
	memcpy(key->nonce, ni->ni_nonce, EAPOL_KEY_NONCE_LEN);

	ni->ni_replaycnt++;
	BE_WRITE_8(key->replaycnt, ni->ni_replaycnt);

	keylen = ieee80211_cipher_keylen(ni->ni_rsncipher);
	BE_WRITE_2(key->keylen, keylen);

	frm = (u_int8_t *)&key[1];
	/* add the WPA/RSN IE included in Beacon/Probe Response */
	if (ni->ni_rsnprotos == IEEE80211_PROTO_RSN) {
		frm = ieee80211_add_rsn(frm, ic, ic->ic_bss);
		/* encapsulate the GTK */
		frm = ieee80211_add_gtk_kde(frm, ni, k);
		LE_WRITE_6(key->rsc, k->k_tsc);
		/* encapsulate the IGTK if MFP was negotiated */
		if (ni->ni_flags & IEEE80211_NODE_MFP) {
			frm = ieee80211_add_igtk_kde(frm,
			    &ic->ic_nw_keys[ic->ic_igtk_kid]);
		}
		/* ask that the EAPOL-Key frame be encrypted */
		info |= EAPOL_KEY_ENCRYPTED | EAPOL_KEY_SECURE;
	} else	/* WPA */
		frm = ieee80211_add_wpa(frm, ic, ic->ic_bss);

	/* write the key info field */
	BE_WRITE_2(key->info, info);

	m->m_pkthdr.len = m->m_len = frm - (u_int8_t *)key;

	if (ic->ic_if.if_flags & IFF_DEBUG)
		printf("%s: sending msg %d/%d of the %s handshake to %s\n",
		    ic->ic_if.if_xname, 3, 4, "4-way",
		    ether_sprintf(ni->ni_macaddr));

	return ieee80211_send_eapol_key(ic, m, ni, &ni->ni_ptk);
}
/*
 * Send Group Key Handshake Message 1 to the supplicant.
 */
int
ieee80211_send_group_msg1(struct ieee80211com *ic, struct ieee80211_node *ni)
{
	struct ieee80211_eapol_key *key;
	const struct ieee80211_key *k;
	struct mbuf *m;
	u_int16_t info;
	u_int8_t *frm;
	u_int8_t kid;

	ni->ni_rsn_gstate = RSNA_REKEYNEGOTIATING;
	if (++ni->ni_rsn_retries > 3) {
		IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
		    IEEE80211_REASON_GROUP_TIMEOUT);
		ieee80211_node_leave(ic, ni);
		return 0;
	}
	if (ni->ni_flags & IEEE80211_NODE_REKEY)
		kid = (ic->ic_def_txkey == 1) ? 2 : 1;
	else
		kid = ic->ic_def_txkey;
	k = &ic->ic_nw_keys[kid];

	m = ieee80211_get_eapol_key(M_DONTWAIT, MT_DATA,
	    ((ni->ni_rsnprotos == IEEE80211_PROTO_WPA) ?
		k->k_len : 2 + 6 + k->k_len) +
	    ((ni->ni_flags & IEEE80211_NODE_MFP) ? 2 + 28 : 0) +
	    15);
	if (m == NULL)
		return ENOMEM;
	key = mtod(m, struct ieee80211_eapol_key *);
	memset(key, 0, sizeof(*key));

	info = EAPOL_KEY_KEYACK | EAPOL_KEY_KEYMIC | EAPOL_KEY_SECURE |
	    EAPOL_KEY_ENCRYPTED;

	ni->ni_replaycnt++;
	BE_WRITE_8(key->replaycnt, ni->ni_replaycnt);

	frm = (u_int8_t *)&key[1];
	if (ni->ni_rsnprotos == IEEE80211_PROTO_WPA) {
		/* WPA does not have GTK KDE */
		BE_WRITE_2(key->keylen, k->k_len);
		memcpy(frm, k->k_key, k->k_len);
		frm += k->k_len;
		info |= (k->k_id & 0x3) << EAPOL_KEY_WPA_KID_SHIFT;
		if (ni->ni_rsncipher == IEEE80211_CIPHER_USEGROUP)
			info |= EAPOL_KEY_WPA_TX;
	} else {	/* RSN */
		frm = ieee80211_add_gtk_kde(frm, ni, k);
		if (ni->ni_flags & IEEE80211_NODE_MFP) {
			if (ni->ni_flags & IEEE80211_NODE_REKEY)
				kid = (ic->ic_igtk_kid == 4) ? 5 : 4;
			else
				kid = ic->ic_igtk_kid;
			frm = ieee80211_add_igtk_kde(frm,
			    &ic->ic_nw_keys[kid]);
		}
	}
	/* RSC = last transmit sequence number for the GTK */
	LE_WRITE_6(key->rsc, k->k_tsc);

	/* write the key info field */
	BE_WRITE_2(key->info, info);

	m->m_pkthdr.len = m->m_len = frm - (u_int8_t *)key;

	if (ic->ic_if.if_flags & IFF_DEBUG)
		printf("%s: sending msg %d/%d of the %s handshake to %s\n",
		    ic->ic_if.if_xname, 1, 2, "group key",
		    ether_sprintf(ni->ni_macaddr));

	return ieee80211_send_eapol_key(ic, m, ni, &ni->ni_ptk);
}
Exemple #10
0
/*
 * ae_attach:
 *
 *	Attach an ae interface to the system.
 */
void
ae_attach(device_t parent, device_t self, void *aux)
{
	const uint8_t *enaddr;
	prop_data_t ea;
	struct ae_softc *sc = device_private(self);
	struct arbus_attach_args *aa = aux;
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	int i, error;

	sc->sc_dev = self;

	callout_init(&sc->sc_tick_callout, 0);

	printf(": Atheros AR531X 10/100 Ethernet\n");

	/*
	 * Try to get MAC address.
	 */
	ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-address");
	if (ea == NULL) {
		printf("%s: unable to get mac-addr property\n",
		    device_xname(sc->sc_dev));
		return;
	}
	KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
	KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
	enaddr = prop_data_data_nocopy(ea);

	/* Announce ourselves. */
	printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
	    ether_sprintf(enaddr));

	sc->sc_cirq = aa->aa_cirq;
	sc->sc_mirq = aa->aa_mirq;
	sc->sc_st = aa->aa_bst;
	sc->sc_dmat = aa->aa_dmat;

	SIMPLEQ_INIT(&sc->sc_txfreeq);
	SIMPLEQ_INIT(&sc->sc_txdirtyq);

	/*
	 * Map registers.
	 */
	sc->sc_size = aa->aa_size;
	if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
	    &sc->sc_sh)) != 0) {
		printf("%s: unable to map registers, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_0;
	}

	/*
	 * Allocate the control data structures, and create and load the
	 * DMA map for it.
	 */
	if ((error = bus_dmamem_alloc(sc->sc_dmat,
	    sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
	    1, &sc->sc_cdnseg, 0)) != 0) {
		printf("%s: unable to allocate control data, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_1;
	}

	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg,
	    sizeof(struct ae_control_data), (void **)&sc->sc_control_data,
	    BUS_DMA_COHERENT)) != 0) {
		printf("%s: unable to map control data, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_2;
	}

	if ((error = bus_dmamap_create(sc->sc_dmat,
	    sizeof(struct ae_control_data), 1,
	    sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
		printf("%s: unable to create control data DMA map, "
		    "error = %d\n", device_xname(sc->sc_dev), error);
		goto fail_3;
	}

	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
	    sc->sc_control_data, sizeof(struct ae_control_data), NULL,
	    0)) != 0) {
		printf("%s: unable to load control data DMA map, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_4;
	}

	/*
	 * Create the transmit buffer DMA maps.
	 */
	for (i = 0; i < AE_TXQUEUELEN; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
		    AE_NTXSEGS, MCLBYTES, 0, 0,
		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
			printf("%s: unable to create tx DMA map %d, "
			    "error = %d\n", device_xname(sc->sc_dev), i, error);
			goto fail_5;
		}
	}

	/*
	 * Create the receive buffer DMA maps.
	 */
	for (i = 0; i < AE_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
			printf("%s: unable to create rx DMA map %d, "
			    "error = %d\n", device_xname(sc->sc_dev), i, error);
			goto fail_6;
		}
		sc->sc_rxsoft[i].rxs_mbuf = NULL;
	}

	/*
	 * Reset the chip to a known state.
	 */
	ae_reset(sc);

	/*
	 * From this point forward, the attachment cannot fail.  A failure
	 * before this point releases all resources that may have been
	 * allocated.
	 */
	sc->sc_flags |= AE_ATTACHED;

	/*
	 * Initialize our media structures.  This may probe the MII, if
	 * present.
	 */
	sc->sc_mii.mii_ifp = ifp;
	sc->sc_mii.mii_readreg = ae_mii_readreg;
	sc->sc_mii.mii_writereg = ae_mii_writereg;
	sc->sc_mii.mii_statchg = ae_mii_statchg;
	sc->sc_ethercom.ec_mii = &sc->sc_mii;
	ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
	    ether_mediastatus);
	mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);

	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
	} else
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);

	sc->sc_tick = ae_mii_tick;

	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	sc->sc_if_flags = ifp->if_flags;
	ifp->if_ioctl = ae_ioctl;
	ifp->if_start = ae_start;
	ifp->if_watchdog = ae_watchdog;
	ifp->if_init = ae_init;
	ifp->if_stop = ae_stop;
	IFQ_SET_READY(&ifp->if_snd);

	/*
	 * We can support 802.1Q VLAN-sized frames.
	 */
	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;

	/*
	 * Attach the interface.
	 */
	if_attach(ifp);
	ether_ifattach(ifp, enaddr);
	ether_set_ifflags_cb(&sc->sc_ethercom, ae_ifflags_cb);

	rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
	    RND_TYPE_NET, RND_FLAG_DEFAULT);

	/*
	 * Make sure the interface is shutdown during reboot.
	 */
	sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc);
	if (sc->sc_sdhook == NULL)
		printf("%s: WARNING: unable to establish shutdown hook\n",
		    device_xname(sc->sc_dev));

	/*
	 * Add a suspend hook to make sure we come back up after a
	 * resume.
	 */
	sc->sc_powerhook = powerhook_establish(device_xname(sc->sc_dev),
	    ae_power, sc);
	if (sc->sc_powerhook == NULL)
		printf("%s: WARNING: unable to establish power hook\n",
		    device_xname(sc->sc_dev));
	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt.  Do this in reverse order and fall through.
	 */
 fail_6:
	for (i = 0; i < AE_NRXDESC; i++) {
		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_rxsoft[i].rxs_dmamap);
	}
 fail_5:
	for (i = 0; i < AE_TXQUEUELEN; i++) {
		if (sc->sc_txsoft[i].txs_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_txsoft[i].txs_dmamap);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
 fail_4:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
 fail_3:
	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
	    sizeof(struct ae_control_data));
 fail_2:
	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);
 fail_1:
	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);
 fail_0:
	return;
}
Exemple #11
0
/*
 * Interface exists: make available by filling in network interface
 * record.  System will initialize the interface when it is ready
 * to accept packets.  We get the ethernet address here.
 */
void
deattach(struct device *parent, struct device *self, void *aux)
{
	struct uba_attach_args *ua = aux;
	struct de_softc *sc = (struct de_softc *)self;
	struct ifnet *ifp = &sc->sc_if;
	u_int8_t myaddr[ETHER_ADDR_LEN];
	int csr1, error;
	char *c;

	sc->sc_iot = ua->ua_iot;
	sc->sc_ioh = ua->ua_ioh;
	sc->sc_dmat = ua->ua_dmat;

	/*
	 * What kind of a board is this?
	 * The error bits 4-6 in pcsr1 are a device id as long as
	 * the high byte is zero.
	 */
	csr1 = DE_RCSR(DE_PCSR1);
	if (csr1 & 0xff60)
		c = "broken";
	else if (csr1 & 0x10)
		c = "delua";
	else
		c = "deuna";

	/*
	 * Reset the board and temporarily map
	 * the pcbb buffer onto the Unibus.
	 */
	DE_WCSR(DE_PCSR0, 0);		/* reset INTE */
	DELAY(100);
	DE_WCSR(DE_PCSR0, PCSR0_RSET);
	dewait(sc, "reset");

	sc->sc_ui.ui_size = sizeof(struct de_cdata);
	if ((error = ubmemalloc((struct uba_softc *)parent, &sc->sc_ui, 0)))
		return printf(": failed ubmemalloc(), error = %d\n", error);
	sc->sc_dedata = (struct de_cdata *)sc->sc_ui.ui_vaddr;

	/*
	 * Tell the DEUNA about our PCB
	 */
	DE_WCSR(DE_PCSR2, LOWORD(sc->sc_ui.ui_baddr));
	DE_WCSR(DE_PCSR3, HIWORD(sc->sc_ui.ui_baddr));
	DE_WLOW(CMD_GETPCBB);
	dewait(sc, "pcbb");

	sc->sc_dedata->dc_pcbb.pcbb0 = FC_RDPHYAD;
	DE_WLOW(CMD_GETCMD);
	dewait(sc, "read addr ");

	bcopy((caddr_t)&sc->sc_dedata->dc_pcbb.pcbb2, myaddr, sizeof (myaddr));
	printf(": %s, address %s\n", c, ether_sprintf(myaddr));

	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, deintr, sc, 
	    &sc->sc_intrcnt);
	uba_reset_establish(dereset, &sc->sc_dev);
	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
	    sc->sc_dev.dv_xname, "intr");

	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST|IFF_ALLMULTI;
	ifp->if_ioctl = deioctl;
	ifp->if_start = destart;
	ifp->if_init = deinit;
	ifp->if_stop = destop;
	IFQ_SET_READY(&ifp->if_snd);

	if_attach(ifp);
	ether_ifattach(ifp, myaddr);
	ubmemfree((struct uba_softc *)parent, &sc->sc_ui);

	sc->sc_sh = shutdownhook_establish(deshutdown, sc);
}
Exemple #12
0
void mlme_recv_auth_btamp(struct ieee80211_node *ni,
                          u_int16_t algo, u_int16_t seq, u_int16_t status_code,
                          u_int8_t *challenge, u_int8_t challenge_length, wbuf_t wbuf)
{

    struct ieee80211vap           *vap = ni->ni_vap;
    struct ieee80211_mlme_priv    *mlme_priv = vap->iv_mlme_priv;
    struct ieee80211_frame        *wh;
    u_int16_t                     indication_status = IEEE80211_STATUS_SUCCESS,response_status = IEEE80211_STATUS_SUCCESS ;
    bool                          send_auth_response=true, indicate=true;

    wh = (struct ieee80211_frame *) wbuf_header(wbuf);
    /* AP must be up and running */
    if (!mlme_priv->im_connection_up || ieee80211_vap_ready_is_clear(vap)) {
        return;
    }

    IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr2,
                       "recv auth frame with algorithm %d seq %d \n", algo, seq);

    do {

        /* Check node existance for the peer */
        if (ni == vap->iv_bss) {
            return;
        } else {
            ieee80211_ref_node(ni);
        }

        /* Validate algo */
        if (algo == IEEE80211_AUTH_ALG_OPEN) {
            if (mlme_priv->im_expected_auth_seq_number) {
                send_auth_response = false;
                indicate = false;
                if (seq == mlme_priv->im_expected_auth_seq_number) {
                    if (!OS_CANCEL_TIMER(&mlme_priv->im_timeout_timer)) {
                        IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s: Timed-out already\n", __func__);
                        break;
                    }

                    IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s: mlme_auth_complete\n", __func__);

                    /* Request complete */
                    mlme_priv->im_request_type = MLME_REQ_NONE;

                    /* Authentication complete (success or failure) */
                    IEEE80211_DELIVER_EVENT_MLME_AUTH_COMPLETE(vap, status_code);
                    vap->iv_mlme_priv->im_expected_auth_seq_number = 0;
                } else {
                    break;
                }
            } else {
                if (seq != IEEE80211_AUTH_OPEN_REQUEST) {
                    response_status = IEEE80211_STATUS_SEQUENCE;
                    indication_status = IEEE80211_STATUS_SEQUENCE;
                    break;
                } else {
                    indicate = true;
                    send_auth_response = true;
                }
            }
        } else if (algo == IEEE80211_AUTH_ALG_SHARED) {
            response_status = IEEE80211_STATUS_ALG;
            indication_status = IEEE80211_STATUS_ALG;
            break;
        } else {
            IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH | IEEE80211_MSG_CRYPTO,
                              "[%s] auth: unsupported algorithm %d \n",ether_sprintf(wh->i_addr2),algo);
            vap->iv_stats.is_rx_auth_unsupported++;
            response_status = IEEE80211_STATUS_ALG;
            indication_status = IEEE80211_STATUS_ALG;
            break;
        }
    } while (FALSE);

    IEEE80211_DELIVER_EVENT_MLME_AUTH_INDICATION(vap, ni->ni_macaddr, indication_status);

    if (send_auth_response) {
        ieee80211_send_auth(ni, seq + 1, response_status, NULL, 0);
    }

    if (ni) {
        if (indication_status != IEEE80211_STATUS_SUCCESS ){
            /* auth is not success, remove the node from node table*/
            ieee80211_node_leave(ni);
        }
        /*
         * release the reference created at the begining of the case above
         * either by alloc_node or ref_node.
         */ 
        ieee80211_free_node(ni);
    }
}
/*
 * Interface exists: make available by filling in network interface
 * record.  System will initialize the interface when it is ready
 * to accept packets.
 */
static void
niattach(device_t parent, device_t self, void *aux)
{
	struct bi_attach_args *ba = aux;
	struct ni_softc *sc = device_private(self);
	struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
	struct ni_msg *msg;
	struct ni_ptdb *ptdb;
	void *va;
	int i, j, s, res;
	u_short type;

	sc->sc_dev = self;

	type = bus_space_read_2(ba->ba_iot, ba->ba_ioh, BIREG_DTYPE);
	printf(": DEBN%c\n", type == BIDT_DEBNA ? 'A' : type == BIDT_DEBNT ?
	    'T' : 'K');
	sc->sc_iot = ba->ba_iot;
	sc->sc_ioh = ba->ba_ioh;
	sc->sc_dmat = ba->ba_dmat;

	bi_intr_establish(ba->ba_icookie, ba->ba_ivec,
		niintr, sc, &sc->sc_intrcnt);
	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
		device_xname(self), "intr");

	ni_getpgs(sc, sizeof(struct ni_gvppqb), (void **)&sc->sc_gvppqb,
	    (paddr_t *)&sc->sc_pgvppqb);
	ni_getpgs(sc, sizeof(struct ni_fqb), (void **)&sc->sc_fqb, 0);
	ni_getpgs(sc, NBDESCS * sizeof(struct ni_bbd),
	    (void **)&sc->sc_bbd, 0);
	/*
	 * Zero the newly allocated memory.
	 */

	nipqb->np_veclvl = (ba->ba_ivec << 2) + 2;
	nipqb->np_node = ba->ba_intcpu;
	nipqb->np_vpqb = (u_int32_t)gvp;
#ifdef __vax__
	nipqb->np_spt = nipqb->np_gpt = mfpr(PR_SBR);
	nipqb->np_sptlen = nipqb->np_gptlen = mfpr(PR_SLR);
#else
#error Must fix support for non-vax.
#endif
	nipqb->np_bvplvl = 1;
	nipqb->np_vfqb = (u_int32_t)fqb;
	nipqb->np_vbdt = (u_int32_t)bbd;
	nipqb->np_nbdr = NBDESCS;

	/* Free queue block */
	nipqb->np_freeq = NQUEUES;
	fqb->nf_mlen = PKTHDR+MSGADD;
	fqb->nf_dlen = PKTHDR+TXADD;
	fqb->nf_rlen = PKTHDR+RXADD;

	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_start = nistart;
	ifp->if_ioctl = niioctl;
	ifp->if_watchdog = nitimeout;
	IFQ_SET_READY(&ifp->if_snd);

	/*
	 * Start init sequence.
	 */

	/* Reset the node */
	NI_WREG(BIREG_VAXBICSR, NI_RREG(BIREG_VAXBICSR) | BICSR_NRST);
	DELAY(500000);
	i = 20;
	while ((NI_RREG(BIREG_VAXBICSR) & BICSR_BROKE) && --i)
		DELAY(500000);
	if (i == 0) {
		printf("%s: BROKE bit set after reset\n", device_xname(self));
		return;
	}

	/* Check state */
	if (failtest(sc, NI_PSR, PSR_STATE, PSR_UNDEF, "not undefined state"))
		return;

	/* Clear owner bits */
	NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);
	NI_WREG(NI_PCR, NI_RREG(NI_PCR) & ~PCR_OWN);

	/* kick off init */
	NI_WREG(NI_PCR, (u_int32_t)sc->sc_pgvppqb | PCR_INIT | PCR_OWN);
	while (NI_RREG(NI_PCR) & PCR_OWN)
		DELAY(100000);

	/* Check state */
	if (failtest(sc, NI_PSR, PSR_INITED, PSR_INITED, "failed initialize"))
		return;

	NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);

	WAITREG(NI_PCR, PCR_OWN);
	NI_WREG(NI_PCR, PCR_OWN|PCR_ENABLE);
	WAITREG(NI_PCR, PCR_OWN);
	WAITREG(NI_PSR, PSR_OWN);

	/* Check state */
	if (failtest(sc, NI_PSR, PSR_STATE, PSR_ENABLED, "failed enable"))
		return;

	NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);

	/*
	 * The message queue packets must be located on the beginning
	 * of a page. A VAX page is 512 bytes, but it clusters 8 pages.
	 * This knowledge is used here when allocating pages.
	 * !!! How should this be done on MIPS and Alpha??? !!!
	 */
#if NBPG < 4096
#error pagesize too small
#endif
	s = splvm();
	/* Set up message free queue */
	ni_getpgs(sc, NMSGBUF * 512, &va, 0);
	for (i = 0; i < NMSGBUF; i++) {
		msg = (void *)((char *)va + i * 512);
		res = INSQTI(msg, &fqb->nf_mforw);
	}
	WAITREG(NI_PCR, PCR_OWN);
	NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN);
	WAITREG(NI_PCR, PCR_OWN);

	/* Set up xmit queue */
	ni_getpgs(sc, NTXBUF * 512, &va, 0);
	for (i = 0; i < NTXBUF; i++) {
		struct ni_dg *data;

		data = (void *)((char *)va + i * 512);
		data->nd_status = 0;
		data->nd_len = TXADD;
		data->nd_ptdbidx = 1;
		data->nd_opcode = BVP_DGRAM;
		for (j = 0; j < NTXFRAGS; j++) {
			data->bufs[j]._offset = 0;
			data->bufs[j]._key = 1;
			bbd[i * NTXFRAGS + j].nb_key = 1;
			bbd[i * NTXFRAGS + j].nb_status = 0;
			data->bufs[j]._index = i * NTXFRAGS + j;
		}
		res = INSQTI(data, &fqb->nf_dforw);
	}
	WAITREG(NI_PCR, PCR_OWN);
	NI_WREG(NI_PCR, PCR_FREEQNE|PCR_DFREEQ|PCR_OWN);
	WAITREG(NI_PCR, PCR_OWN);

	/* recv buffers */
	ni_getpgs(sc, NRXBUF * 512, &va, 0);
	for (i = 0; i < NRXBUF; i++) {
		struct ni_dg *data;
		int idx;

		data = (void *)((char *)va + i * 512);
		data->nd_len = RXADD;
		data->nd_opcode = BVP_DGRAMRX;
		data->nd_ptdbidx = 2;
		data->bufs[0]._key = 1;

		idx = NTXBUF * NTXFRAGS + i;
		if (ni_add_rxbuf(sc, data, idx))
			panic("niattach: ni_add_rxbuf: out of mbufs");

		res = INSQTI(data, &fqb->nf_rforw);
	}
	WAITREG(NI_PCR, PCR_OWN);
	NI_WREG(NI_PCR, PCR_FREEQNE|PCR_RFREEQ|PCR_OWN);
	WAITREG(NI_PCR, PCR_OWN);

	splx(s);

	/* Set initial parameters */
	msg = REMQHI(&fqb->nf_mforw);

	msg->nm_opcode = BVP_MSG;
	msg->nm_status = 0;
	msg->nm_len = sizeof(struct ni_param) + 6;
	msg->nm_opcode2 = NI_WPARAM;
	((struct ni_param *)&msg->nm_text[0])->np_flags = NP_PAD;

	endwait = retry = 0;
	res = INSQTI(msg, &gvp->nc_forw0);

retry:	WAITREG(NI_PCR, PCR_OWN);
	NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
	WAITREG(NI_PCR, PCR_OWN);
	i = 1000;
	while (endwait == 0 && --i)
		DELAY(10000);

	if (endwait == 0) {
		if (++retry < 3)
			goto retry;
		printf("%s: no response to set params\n", device_xname(self));
		return;
	}

	/* Clear counters */
	msg = REMQHI(&fqb->nf_mforw);
	msg->nm_opcode = BVP_MSG;
	msg->nm_status = 0;
	msg->nm_len = sizeof(struct ni_param) + 6;
	msg->nm_opcode2 = NI_RCCNTR;

	res = INSQTI(msg, &gvp->nc_forw0);

	WAITREG(NI_PCR, PCR_OWN);
	NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
	WAITREG(NI_PCR, PCR_OWN);

	/* Enable transmit logic */
	msg = REMQHI(&fqb->nf_mforw);

	msg->nm_opcode = BVP_MSG;
	msg->nm_status = 0;
	msg->nm_len = 18;
	msg->nm_opcode2 = NI_STPTDB;
	ptdb = (struct ni_ptdb *)&msg->nm_text[0];
	memset(ptdb, 0, sizeof(struct ni_ptdb));
	ptdb->np_index = 1;
	ptdb->np_fque = 1;

	res = INSQTI(msg, &gvp->nc_forw0);

	WAITREG(NI_PCR, PCR_OWN);
	NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
	WAITREG(NI_PCR, PCR_OWN);

	/* Wait for everything to finish */
	WAITREG(NI_PSR, PSR_OWN);

	printf("%s: hardware address %s\n", device_xname(self),
	    ether_sprintf(sc->sc_enaddr));

	/*
	 * Attach the interface.
	 */
	if_attach(ifp);
	ether_ifattach(ifp, sc->sc_enaddr);
	if (shutdownhook_establish(ni_shutdown, sc) == 0)
		aprint_error_dev(self, "WARNING: unable to establish shutdown hook\n");
}
/*
 * Transmit interrupt routine
 */
static void 
sonictxint(struct sn_softc *sc)
{
	struct mtd	*mtd;
	void		*txp;
	unsigned short	txp_status;
	int		mtd_hw;
	struct ifnet	*ifp = &sc->sc_if;

	mtd_hw = sc->mtd_hw;

	if (mtd_hw == sc->mtd_free)
		return;

	while (mtd_hw != sc->mtd_free) {
		mtd = &sc->mtda[mtd_hw];

		txp = mtd->mtd_txp;

		if (SRO(sc->bitmode, txp, TXP_STATUS) == 0) {
			break; /* it hasn't really gone yet */
		}

#ifdef SNDEBUG
		{
			struct ether_header *eh;

			eh = (struct ether_header *) mtd->mtd_buf;
			printf("%s: xmit status=0x%x len=%d type=0x%x from %s",
			    device_xname(sc->sc_dev),
			    SRO(sc->bitmode, txp, TXP_STATUS),
			    SRO(sc->bitmode, txp, TXP_PKTSIZE),
			    htons(eh->ether_type),
			    ether_sprintf(eh->ether_shost));
			printf(" (to %s)\n", ether_sprintf(eh->ether_dhost));
		}
#endif /* SNDEBUG */

		ifp->if_flags &= ~IFF_OACTIVE;

		if (mtd->mtd_mbuf != 0) {
			m_freem(mtd->mtd_mbuf);
			mtd->mtd_mbuf = 0;
		}
		if (++mtd_hw == NTDA) mtd_hw = 0;

		txp_status = SRO(sc->bitmode, txp, TXP_STATUS);

		ifp->if_collisions += (txp_status & TCR_EXC) ? 16 :
			((txp_status & TCR_NC) >> 12);

		if ((txp_status & TCR_PTX) == 0) {
			ifp->if_oerrors++;
			printf("%s: Tx packet status=0x%x\n",
			    device_xname(sc->sc_dev), txp_status);

			/* XXX - DG This looks bogus */
			if (mtd_hw != sc->mtd_free) {
				printf("resubmitting remaining packets\n");
				mtd = &sc->mtda[mtd_hw];
				NIC_PUT(sc, SNR_CTDA, LOWER(mtd->mtd_vtxp));
				NIC_PUT(sc, SNR_CR, CR_TXP);
				wbflush();
				break;
			}
		}
	}

	sc->mtd_hw = mtd_hw;
	return;
}
Exemple #15
0
void
cdcef_attach(struct device *parent, struct device *self, void *aux)
{
	struct cdcef_softc *sc = (struct cdcef_softc *)self;
	struct usbf_attach_arg *uaa = aux;
	struct usbf_device *dev = uaa->device;
	struct ifnet *ifp;
	usbf_status err;
	struct usb_cdc_union_descriptor udesc;
	int s;
	u_int16_t macaddr_hi;


	/* Set the device identification according to the function. */
	usbf_devinfo_setup(dev, UDCLASS_IN_INTERFACE, 0, 0, CDCEF_VENDOR_ID,
	    CDCEF_PRODUCT_ID, CDCEF_DEVICE_CODE, CDCEF_VENDOR_STRING,
	    CDCEF_PRODUCT_STRING, CDCEF_SERIAL_STRING);

	/* Fill in the fields needed by the parent device. */
	sc->sc_dev.methods = &cdcef_methods;

	/* timeout to start delayed transfers */
	timeout_set(&sc->start_to, cdcef_start_timeout, sc);

	/*
	 * Build descriptors according to the device class specification.
	 */
	err = usbf_add_config(dev, &sc->sc_config);
	if (err) {
		printf(": usbf_add_config failed\n");
		return;
	}
	err = usbf_add_interface(sc->sc_config, UICLASS_CDC,
	    UISUBCLASS_ETHERNET_NETWORKING_CONTROL_MODEL, 0, NULL,
	    &sc->sc_iface);
	if (err) {
		printf(": usbf_add_interface failed\n");
		return;
	}
	/* XXX don't use hard-coded values 128 and 16. */
	err = usbf_add_endpoint(sc->sc_iface, UE_DIR_IN | 2, UE_BULK,
	    64, 16, &sc->sc_ep_in) ||
	    usbf_add_endpoint(sc->sc_iface, UE_DIR_OUT | 1, UE_BULK,
	    64, 16, &sc->sc_ep_out);
	if (err) {
		printf(": usbf_add_endpoint failed\n");
		return;
	}

	/* Append a CDC union descriptor. */
	bzero(&udesc, sizeof udesc);
	udesc.bLength = sizeof udesc;
	udesc.bDescriptorType = UDESC_CS_INTERFACE;
	udesc.bDescriptorSubtype = UDESCSUB_CDC_UNION;
	udesc.bSlaveInterface[0] = usbf_interface_number(sc->sc_iface);
	err = usbf_add_config_desc(sc->sc_config,
	    (usb_descriptor_t *)&udesc, NULL);
	if (err) {
		printf(": usbf_add_config_desc failed\n");
		return;
	}

	/*
	 * Close the configuration and build permanent descriptors.
	 */
	err = usbf_end_config(sc->sc_config);
	if (err) {
		printf(": usbf_end_config failed\n");
		return;
	}

	/* Preallocate xfers and data buffers. */
	sc->sc_xfer_in = usbf_alloc_xfer(dev);
	sc->sc_xfer_out = usbf_alloc_xfer(dev);
	sc->sc_buffer_in = usbf_alloc_buffer(sc->sc_xfer_in,
	    CDCEF_BUFSZ);
	sc->sc_buffer_out = usbf_alloc_buffer(sc->sc_xfer_out,
	    CDCEF_BUFSZ);
	if (sc->sc_buffer_in == NULL || sc->sc_buffer_out == NULL) {
		printf(": usbf_alloc_buffer failed\n");
		return;
	}

	/* Open the bulk pipes. */
	err = usbf_open_pipe(sc->sc_iface,
	    usbf_endpoint_address(sc->sc_ep_out), &sc->sc_pipe_out) ||
	    usbf_open_pipe(sc->sc_iface,
	    usbf_endpoint_address(sc->sc_ep_in), &sc->sc_pipe_in);
	if (err) {
		printf(": usbf_open_pipe failed\n");
		return;
	}

	/* Get ready to receive packets. */
	usbf_setup_xfer(sc->sc_xfer_out, sc->sc_pipe_out, sc,
	    sc->sc_buffer_out, CDCEF_BUFSZ, USBD_SHORT_XFER_OK, 0, cdcef_rxeof);
	err = usbf_transfer(sc->sc_xfer_out);
	if (err && err != USBF_IN_PROGRESS) {
		printf(": usbf_transfer failed\n");
		return;
	}

	s = splnet();

	macaddr_hi = htons(0x2acb);
	bcopy(&macaddr_hi, &sc->sc_arpcom.ac_enaddr[0], sizeof(u_int16_t));
	bcopy(&ticks, &sc->sc_arpcom.ac_enaddr[2], sizeof(u_int32_t));
	sc->sc_arpcom.ac_enaddr[5] = (u_int8_t)(sc->sc_dev.bdev.dv_unit);

	printf(": address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));

	ifp = GET_IFP(sc);
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = cdcef_ioctl;
	ifp->if_start = cdcef_start;
	ifp->if_watchdog = cdcef_watchdog;
	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);

	IFQ_SET_READY(&ifp->if_snd);

	if_attach(ifp);
	ether_ifattach(ifp);

	splx(s);
}
Exemple #16
0
static void
sq_attach(struct device *parent, struct device *self, void *aux)
{
	int i, err;
	char* macaddr;
	struct sq_softc *sc = (void *)self;
	struct hpc_attach_args *haa = aux;
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;

	sc->sc_hpct = haa->ha_st;
	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
				       haa->ha_dmaoff,
				       HPC_ENET_REGS_SIZE,
				       &sc->sc_hpch)) != 0) {
		printf(": unable to map HPC DMA registers, error = %d\n", err);
		goto fail_0;
	}

	sc->sc_regt = haa->ha_st;
	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
				       haa->ha_devoff,
				       HPC_ENET_DEVREGS_SIZE,
				       &sc->sc_regh)) != 0) {
		printf(": unable to map Seeq registers, error = %d\n", err);
		goto fail_0;
	}

	sc->sc_dmat = haa->ha_dmat;

	if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
				    PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
				    1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
		printf(": unable to allocate control data, error = %d\n", err);
		goto fail_0;
	}

	if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
				  sizeof(struct sq_control),
				  (caddr_t *)&sc->sc_control,
				  BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
		printf(": unable to map control data, error = %d\n", err);
		goto fail_1;
	}

	if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
				     1, sizeof(struct sq_control), PAGE_SIZE,
				     BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
		printf(": unable to create DMA map for control data, error "
			"= %d\n", err);
		goto fail_2;
	}

	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
				   sizeof(struct sq_control),
				   NULL, BUS_DMA_NOWAIT)) != 0) {
		printf(": unable to load DMA map for control data, error "
			"= %d\n", err);
		goto fail_3;
	}

	memset(sc->sc_control, 0, sizeof(struct sq_control));

	/* Create transmit buffer DMA maps */
	for (i = 0; i < SQ_NTXDESC; i++) {
	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
					 0, BUS_DMA_NOWAIT,
					 &sc->sc_txmap[i])) != 0) {
		    printf(": unable to create tx DMA map %d, error = %d\n",
			   i, err);
		    goto fail_4;
	    }
	}

	/* Create transmit buffer DMA maps */
	for (i = 0; i < SQ_NRXDESC; i++) {
	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
					 0, BUS_DMA_NOWAIT,
					 &sc->sc_rxmap[i])) != 0) {
		    printf(": unable to create rx DMA map %d, error = %d\n",
			   i, err);
		    goto fail_5;
	    }
	}

	/* Pre-allocate the receive buffers.  */
	for (i = 0; i < SQ_NRXDESC; i++) {
		if ((err = sq_add_rxbuf(sc, i)) != 0) {
			printf(": unable to allocate or map rx buffer %d\n,"
			       " error = %d\n", i, err);
			goto fail_6;
		}
	}

	if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
		printf(": unable to get MAC address!\n");
		goto fail_6;
	}

	evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
					      self->dv_xname, "intr");

	if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
		printf(": unable to establish interrupt!\n");
		goto fail_6;
	}

	/* Reset the chip to a known state. */
	sq_reset(sc);

	/*
	 * Determine if we're an 8003 or 80c03 by setting the first
	 * MAC address register to non-zero, and then reading it back.
	 * If it's zero, we have an 80c03, because we will have read
	 * the TxCollLSB register.
	 */
	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5);
	if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0)
		sc->sc_type = SQ_TYPE_80C03;
	else
		sc->sc_type = SQ_TYPE_8003;
	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00);

	printf(": SGI Seeq %s\n",
	    sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");

	enaddr_aton(macaddr, sc->sc_enaddr);

	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
					   ether_sprintf(sc->sc_enaddr));

	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
	ifp->if_softc = sc;
	ifp->if_mtu = ETHERMTU;
	ifp->if_init = sq_init;
	ifp->if_stop = sq_stop;
	ifp->if_start = sq_start;
	ifp->if_ioctl = sq_ioctl;
	ifp->if_watchdog = sq_watchdog;
	ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
	IFQ_SET_READY(&ifp->if_snd);

	if_attach(ifp);
	ether_ifattach(ifp, sc->sc_enaddr);

	memset(&sq_trace, 0, sizeof(sq_trace));
	/* Done! */
	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt.  Do this in reverse order and fall through.
	 */
fail_6:
	for (i = 0; i < SQ_NRXDESC; i++) {
		if (sc->sc_rxmbuf[i] != NULL) {
			bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
			m_freem(sc->sc_rxmbuf[i]);
		}
	}
fail_5:
	for (i = 0; i < SQ_NRXDESC; i++) {
	    if (sc->sc_rxmap[i] != NULL)
		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
	}
fail_4:
	for (i = 0; i < SQ_NTXDESC; i++) {
	    if (sc->sc_txmap[i] !=  NULL)
		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
fail_3:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
fail_2:
	bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
				      sizeof(struct sq_control));
fail_1:
	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
fail_0:
	return;
}
Exemple #17
0
void
egattach(struct device *parent, struct device *self, void *aux)
{
	struct eg_softc *sc = (void *)self;
	struct isa_attach_args *ia = aux;
	bus_space_tag_t bst = sc->sc_bst = ia->ia_iot;
	bus_space_handle_t bsh;
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	
	if (bus_space_map(bst, ia->ia_iobase, EG_IO_PORTS, 0, &bsh)) {
		printf("%s: can't map i/o space\n", sc->sc_dev.dv_xname);
		return;
	}
	sc->sc_bsh = bsh;

	egstop(sc);

	sc->eg_pcb[0] = EG_CMD_GETEADDR; /* Get Station address */
	sc->eg_pcb[1] = 0;
	if (egwritePCB(sc) != 0) {
		DPRINTF(("write error\n"));
		return;
	}	
	if (egreadPCB(sc) != 0) {
		DPRINTF(("read error\n"));
		egprintpcb(sc);
		return;
	}

	/* check Get station address response */
	if (sc->eg_pcb[0] != EG_RSP_GETEADDR || sc->eg_pcb[1] != 0x06) { 
		DPRINTF(("parse error\n"));
		egprintpcb(sc);
		return;
	}
	bcopy(&sc->eg_pcb[2], sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);

	printf(": ROM v%d.%02d %dk address %s\n",
	    sc->eg_rom_major, sc->eg_rom_minor, sc->eg_ram,
	    ether_sprintf(sc->sc_arpcom.ac_enaddr));

	sc->eg_pcb[0] = EG_CMD_SETEADDR; /* Set station address */
	if (egwritePCB(sc) != 0) {
		DPRINTF(("write error2\n"));
		return;
	}
	if (egreadPCB(sc) != 0) {
		DPRINTF(("read error2\n"));
		egprintpcb(sc);
		return;
	}
	if (sc->eg_pcb[0] != EG_RSP_SETEADDR || sc->eg_pcb[1] != 0x02 ||
	    sc->eg_pcb[2] != 0 || sc->eg_pcb[3] != 0) {
		DPRINTF(("parse error2\n"));
		egprintpcb(sc);
		return;
	}

	/* Initialize ifnet structure. */
	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
	ifp->if_softc = sc;
	ifp->if_start = egstart;
	ifp->if_ioctl = egioctl;
	ifp->if_watchdog = egwatchdog;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS;
	IFQ_SET_READY(&ifp->if_snd);
	
	/* Now we can attach the interface. */
	if_attach(ifp);
	ether_ifattach(ifp);
	
	sc->sc_ih = isa_intr_establish(ia->ia_ic, ia->ia_irq, IST_EDGE,
	    IPL_NET, egintr, sc, sc->sc_dev.dv_xname);
}
Exemple #18
0
static void
bce_attach(device_t parent, device_t self, void *aux)
{
	struct bce_softc *sc = device_private(self);
	struct pci_attach_args *pa = aux;
	const struct bce_product *bp;
	pci_chipset_tag_t pc = pa->pa_pc;
	pci_intr_handle_t ih;
	const char     *intrstr = NULL;
	uint32_t	command;
	pcireg_t	memtype, pmode;
	bus_addr_t	memaddr;
	bus_size_t	memsize;
	void		*kva;
	bus_dma_segment_t seg;
	int             error, i, pmreg, rseg;
	struct ifnet   *ifp;
	char intrbuf[PCI_INTRSTR_LEN];

	sc->bce_dev = self;

	bp = bce_lookup(pa);
	KASSERT(bp != NULL);

	sc->bce_pa = *pa;

	/* BCM440x can only address 30 bits (1GB) */
	if (bus_dmatag_subregion(pa->pa_dmat, 0, (1 << 30),
	    &(sc->bce_dmatag), BUS_DMA_NOWAIT) != 0) {
		aprint_error_dev(self,
		    "WARNING: failed to restrict dma range,"
		    " falling back to parent bus dma range\n");
		sc->bce_dmatag = pa->pa_dmat;
	}

	 aprint_naive(": Ethernet controller\n");
	 aprint_normal(": %s\n", bp->bp_name);

	/*
	 * Map control/status registers.
	 */
	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
	command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);

	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
		aprint_error_dev(self, "failed to enable memory mapping!\n");
		return;
	}
	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
	switch (memtype) {
	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
		if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
		    &sc->bce_bhandle, &memaddr, &memsize) == 0)
			break;
	default:
		aprint_error_dev(self, "unable to find mem space\n");
		return;
	}

	/* Get it out of power save mode if needed. */
	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, NULL)) {
		pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
		if (pmode == 3) {
			/*
			 * The card has lost all configuration data in
			 * this state, so punt.
			 */
			aprint_error_dev(self,
			    "unable to wake up from power state D3\n");
			return;
		}
		if (pmode != 0) {
			aprint_normal_dev(self,
			    "waking up from power state D%d\n", pmode);
			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
		}
	}
	if (pci_intr_map(pa, &ih)) {
		aprint_error_dev(self, "couldn't map interrupt\n");
		return;
	}
	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));

	sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc);

	if (sc->bce_intrhand == NULL) {
		aprint_error_dev(self, "couldn't establish interrupt\n");
		if (intrstr != NULL)
			aprint_error(" at %s", intrstr);
		aprint_error("\n");
		return;
	}
	aprint_normal_dev(self, "interrupting at %s\n", intrstr);

	/* reset the chip */
	bce_reset(sc);

	/*
	 * Allocate DMA-safe memory for ring descriptors.
	 * The receive, and transmit rings can not share the same
	 * 4k space, however both are allocated at once here.
	 */
	/*
	 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
	 * due to the limition above. ??
	 */
	if ((error = bus_dmamem_alloc(sc->bce_dmatag,
	    2 * PAGE_SIZE, PAGE_SIZE, 2 * PAGE_SIZE,
	    &seg, 1, &rseg, BUS_DMA_NOWAIT))) {
		aprint_error_dev(self,
		    "unable to alloc space for ring descriptors, error = %d\n",
		    error);
		return;
	}
	/* map ring space to kernel */
	if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
	    2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
		aprint_error_dev(self,
		    "unable to map DMA buffers, error = %d\n", error);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}
	/* create a dma map for the ring */
	if ((error = bus_dmamap_create(sc->bce_dmatag,
	    2 * PAGE_SIZE, 1, 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT,
	    &sc->bce_ring_map))) {
		aprint_error_dev(self,
		    "unable to create ring DMA map, error = %d\n", error);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}
	/* connect the ring space to the dma map */
	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
	    2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}
	/* save the ring space in softc */
	sc->bce_rx_ring = (struct bce_dma_slot *) kva;
	sc->bce_tx_ring = (struct bce_dma_slot *) ((char *)kva + PAGE_SIZE);

	/* Create the transmit buffer DMA maps. */
	for (i = 0; i < BCE_NTXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES,
		    BCE_NTXFRAGS, MCLBYTES, 0, 0, &sc->bce_cdata.bce_tx_map[i])) != 0) {
			aprint_error_dev(self,
			    "unable to create tx DMA map, error = %d\n", error);
		}
		sc->bce_cdata.bce_tx_chain[i] = NULL;
	}

	/* Create the receive buffer DMA maps. */
	for (i = 0; i < BCE_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, 1,
		    MCLBYTES, 0, 0, &sc->bce_cdata.bce_rx_map[i])) != 0) {
			aprint_error_dev(self,
			    "unable to create rx DMA map, error = %d\n", error);
		}
		sc->bce_cdata.bce_rx_chain[i] = NULL;
	}

	/* Set up ifnet structure */
	ifp = &sc->ethercom.ec_if;
	strcpy(ifp->if_xname, device_xname(self));
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = bce_ioctl;
	ifp->if_start = bce_start;
	ifp->if_watchdog = bce_watchdog;
	ifp->if_init = bce_init;
	ifp->if_stop = bce_stop;
	IFQ_SET_READY(&ifp->if_snd);

	/* Initialize our media structures and probe the MII. */

	sc->bce_mii.mii_ifp = ifp;
	sc->bce_mii.mii_readreg = bce_mii_read;
	sc->bce_mii.mii_writereg = bce_mii_write;
	sc->bce_mii.mii_statchg = bce_statchg;

	sc->ethercom.ec_mii = &sc->bce_mii;
	ifmedia_init(&sc->bce_mii.mii_media, 0, ether_mediachange,
	    ether_mediastatus);
	mii_attach(sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, MIIF_FORCEANEG|MIIF_DOPAUSE);
	if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
		ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
	} else
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);
	/* get the phy */
	sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_PHY) & 0x1f;
	/*
	 * Enable activity led.
	 * XXX This should be in a phy driver, but not currently.
	 */
	bce_mii_write(sc->bce_dev, 1, 26,	 /* MAGIC */
	    bce_mii_read(sc->bce_dev, 1, 26) & 0x7fff);	 /* MAGIC */
	/* enable traffic meter led mode */
	bce_mii_write(sc->bce_dev, 1, 27,	 /* MAGIC */
	    bce_mii_read(sc->bce_dev, 1, 27) | (1 << 6));	 /* MAGIC */

	/* Attach the interface */
	if_attach(ifp);
	sc->enaddr[0] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET0);
	sc->enaddr[1] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET1);
	sc->enaddr[2] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET2);
	sc->enaddr[3] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET3);
	sc->enaddr[4] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET4);
	sc->enaddr[5] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET5);
	aprint_normal_dev(self, "Ethernet address %s\n",
	    ether_sprintf(sc->enaddr));
	ether_ifattach(ifp, sc->enaddr);
	rnd_attach_source(&sc->rnd_source, device_xname(self),
	    RND_TYPE_NET, 0);
	callout_init(&sc->bce_timeout, 0);

	if (pmf_device_register(self, NULL, bce_resume))
		pmf_class_network_register(self, ifp);
	else
		aprint_error_dev(self, "couldn't establish power handler\n");
}
static int
atheros_set_key(const char *ifname, void *priv, enum wpa_alg alg,
		const u8 *addr, int key_idx, int set_tx, const u8 *seq,
		size_t seq_len, const u8 *key, size_t key_len)
{
	struct atheros_driver_data *drv = priv;
	struct ieee80211req_key wk;
	u_int8_t cipher;
	int ret;

	if (alg == WPA_ALG_NONE)
		return atheros_del_key(drv, addr, key_idx);

	wpa_printf(MSG_DEBUG, "%s: alg=%d addr=%s key_idx=%d",
		   __func__, alg, ether_sprintf(addr), key_idx);

	switch (alg) {
	case WPA_ALG_WEP:
		cipher = IEEE80211_CIPHER_WEP;
		break;
	case WPA_ALG_TKIP:
		cipher = IEEE80211_CIPHER_TKIP;
		break;
	case WPA_ALG_CCMP:
		cipher = IEEE80211_CIPHER_AES_CCM;
		break;
#ifdef CONFIG_IEEE80211W
	case WPA_ALG_IGTK:
		cipher = IEEE80211_CIPHER_AES_CMAC;
		break;
#endif /* CONFIG_IEEE80211W */
	default:
		printf("%s: unknown/unsupported algorithm %d\n",
			__func__, alg);
		return -1;
	}

	if (key_len > sizeof(wk.ik_keydata)) {
		printf("%s: key length %lu too big\n", __func__,
		       (unsigned long) key_len);
		return -3;
	}

	memset(&wk, 0, sizeof(wk));
	wk.ik_type = cipher;
	wk.ik_flags = IEEE80211_KEY_RECV | IEEE80211_KEY_XMIT;
	if (addr == NULL || is_broadcast_ether_addr(addr)) {
		memset(wk.ik_macaddr, 0xff, IEEE80211_ADDR_LEN);
		wk.ik_keyix = key_idx;
		if (set_tx)
			wk.ik_flags |= IEEE80211_KEY_DEFAULT;
	} else {
		memcpy(wk.ik_macaddr, addr, IEEE80211_ADDR_LEN);
		wk.ik_keyix = IEEE80211_KEYIX_NONE;
	}
	wk.ik_keylen = key_len;
	memcpy(wk.ik_keydata, key, key_len);

	ret = set80211priv(drv, IEEE80211_IOCTL_SETKEY, &wk, sizeof(wk));
	if (ret < 0) {
		wpa_printf(MSG_DEBUG, "%s: Failed to set key (addr %s"
			   " key_idx %d alg %d key_len %lu set_tx %d)",
			   __func__, ether_sprintf(wk.ik_macaddr), key_idx,
			   alg, (unsigned long) key_len, set_tx);
	}

	return ret;
}
Exemple #20
0
/*
 * Send a packet.  The ether header is already there.
 * Return the length sent (or -1 on error).
 */
int 
netif_put(struct iodesc *desc, void *pkt, size_t len)
{
	struct netif *nif;
	struct devdata *dd;
	struct saioreq *si;
	struct saif *sif;
	char *dmabuf;
	int rv, slen;

#ifdef NETIF_DEBUG
	if (debug > 1) {
		struct ether_header *eh;

		printf("netif_put: desc=0x%x pkt=0x%x len=%d\n",
			   desc, pkt, len);
		eh = pkt;
		printf("dst: %s ", ether_sprintf(eh->ether_dhost));
		printf("src: %s ", ether_sprintf(eh->ether_shost));
		printf("type: 0x%x\n", eh->ether_type & 0xFFFF);
	}
#endif

	nif = desc->io_netif;
	dd = nif->nif_devdata;
	si = &dd->dd_si;
	sif = si->si_sif;
	slen = len;

#ifdef PARANOID
	if (sif == NULL)
		panic("netif_put: no saif ptr");
#endif

	/*
	 * Copy into our transmit buffer because the PROM
	 * network driver might continue using the packet
	 * after the sif_xmit call returns.  We never send
	 * very much data anyway, so the copy is fine.
	 */
	if (slen > dd->tbuf_len)
		panic("netif_put: slen=%d", slen);
	memcpy(dd->tbuf, pkt, slen);

	if (slen < 60) {
		slen = 60;
	}

	rv = (*sif->sif_xmit)(si->si_devdata, dd->tbuf, slen);

#ifdef NETIF_DEBUG
	if (debug > 1)
		printf("netif_put: xmit returned %d\n", rv);
#endif
	/*
	 * Just ignore the return value.  If the PROM transmit
	 * function fails, it will make some noise, such as:
	 *      le: No Carrier
	 */

	return len;
}
Exemple #21
0
void
nfe_attach(struct device *parent, struct device *self, void *aux)
{
	struct nfe_softc *sc = (struct nfe_softc *)self;
	struct pci_attach_args *pa = aux;
	pci_chipset_tag_t pc = pa->pa_pc;
	pci_intr_handle_t ih;
	const char *intrstr;
	struct ifnet *ifp;
	bus_size_t memsize;
	pcireg_t memtype;

	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
	switch (memtype) {
	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
		if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
		    &sc->sc_memh, NULL, &memsize, 0) == 0)
			break;
		/* FALLTHROUGH */
	default:
		printf(": could not map mem space\n");
		return;
	}

	if (pci_intr_map(pa, &ih) != 0) {
		printf(": could not map interrupt\n");
		return;
	}

	intrstr = pci_intr_string(pc, ih);
	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
	    sc->sc_dev.dv_xname);
	if (sc->sc_ih == NULL) {
		printf(": could not establish interrupt");
		if (intrstr != NULL)
			printf(" at %s", intrstr);
		printf("\n");
		return;
	}
	printf(": %s", intrstr);

	sc->sc_dmat = pa->pa_dmat;

	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));

	sc->sc_flags = 0;

	switch (PCI_PRODUCT(pa->pa_id)) {
	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
		break;
	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
		sc->sc_flags |= NFE_40BIT_ADDR;
		break;
	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
		break;
	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR;
		break;
	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
		    NFE_HW_VLAN;
		break;
	}

	/* enable jumbo frames for adapters that support it */
	if (sc->sc_flags & NFE_JUMBO_SUP)
		sc->sc_flags |= NFE_USE_JUMBO;

	/*
	 * Allocate Tx and Rx rings.
	 */
	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
		printf("%s: could not allocate Tx ring\n",
		    sc->sc_dev.dv_xname);
		return;
	}

	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
		printf("%s: could not allocate Rx ring\n",
		    sc->sc_dev.dv_xname);
		nfe_free_tx_ring(sc, &sc->txq);
		return;
	}

	ifp = &sc->sc_arpcom.ac_if;
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = nfe_ioctl;
	ifp->if_start = nfe_start;
	ifp->if_watchdog = nfe_watchdog;
	ifp->if_init = nfe_init;
	ifp->if_baudrate = IF_Gbps(1);
	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
	IFQ_SET_READY(&ifp->if_snd);
	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);

	ifp->if_capabilities = IFCAP_VLAN_MTU;

	if (sc->sc_flags & NFE_USE_JUMBO)
		ifp->if_hardmtu = NFE_JUMBO_MTU;

#if NVLAN > 0
	if (sc->sc_flags & NFE_HW_VLAN)
		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
#endif
	if (sc->sc_flags & NFE_HW_CSUM) {
		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
		    IFCAP_CSUM_UDPv4;
	}

	sc->sc_mii.mii_ifp = ifp;
	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
	sc->sc_mii.mii_statchg = nfe_miibus_statchg;

	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
	    nfe_ifmedia_sts);
	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);
	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
		    0, NULL);
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
	} else
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);

	if_attach(ifp);
	ether_ifattach(ifp);

	timeout_set(&sc->sc_tick_ch, nfe_tick, sc);

	sc->sc_powerhook = powerhook_establish(nfe_power, sc);
}
Exemple #22
0
/*
 * Receive a packet, including the ether header.
 * Return the total length received (or -1 on error).
 */
int 
netif_get(struct iodesc *desc, void *pkt, size_t maxlen, time_t timo)
{
	struct netif *nif;
	struct devdata *dd;
	struct saioreq *si;
	struct saif *sif;
	int tick0, tmo_ticks;
	int rlen = 0;

#ifdef NETIF_DEBUG
	if (debug > 1)
		printf("netif_get: pkt=0x%x, maxlen=%d, tmo=%d\n",
			   pkt, maxlen, timo);
#endif

	nif = desc->io_netif;
	dd = nif->nif_devdata;
	si = &dd->dd_si;
	sif = si->si_sif;

	tmo_ticks = timo * hz;

	/* Have to receive into our own buffer and copy. */
	do {
		tick0 = getticks();
		do {
			rlen = (*sif->sif_poll)(si->si_devdata, dd->rbuf);
			if (rlen != 0)
				goto break2;
		} while (getticks() == tick0);
	} while (--tmo_ticks > 0);

#if 0
	/* No packet arrived.  Better reset the interface. */
	printf("netif_get: timeout; resetting\n");
	(*sif->sif_reset)(si->si_devdata, si);
#endif

break2:

#ifdef NETIF_DEBUG
	if (debug > 1)
		printf("netif_get: received rlen=%d\n", rlen);
#endif

	/* Need at least a valid Ethernet header. */
	if (rlen < 12)
		return -1;

	/* If we went beyond our buffer, were dead! */
	if (rlen > dd->rbuf_len)
		panic("netif_get: rlen=%d", rlen);

	/* The caller's buffer may be smaller... */
	if (rlen > maxlen)
		rlen = maxlen;

	memcpy(pkt, dd->rbuf, rlen);

#ifdef NETIF_DEBUG
	if (debug > 1) {
		struct ether_header *eh = pkt;

		printf("dst: %s ", ether_sprintf(eh->ether_dhost));
		printf("src: %s ", ether_sprintf(eh->ether_shost));
		printf("type: 0x%x\n", eh->ether_type & 0xFFFF);
	}
#endif

	return rlen;
}
Exemple #23
0
static void
ath_rate_update(struct ath_softc *sc, struct ieee80211_node *ni, int rate)
{
	struct ath_node *an = ATH_NODE(ni);
	struct onoe_node *on = ATH_NODE_ONOE(an);
	const HAL_RATE_TABLE *rt = sc->sc_currates;
	u_int8_t rix;

	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));

	DPRINTF(sc, "%s: set xmit rate for %s to %dM\n",
	    __func__, ether_sprintf(ni->ni_macaddr),
	    ni->ni_rates.rs_nrates > 0 ?
		(ni->ni_rates.rs_rates[rate] & IEEE80211_RATE_VAL) / 2 : 0);

	ni->ni_txrate = rate;
	/*
	 * Before associating a node has no rate set setup
	 * so we can't calculate any transmit codes to use.
	 * This is ok since we should never be sending anything
	 * but management frames and those always go at the
	 * lowest hardware rate.
	 */
	if (ni->ni_rates.rs_nrates == 0)
		goto done;
	on->on_tx_rix0 = sc->sc_rixmap[
		ni->ni_rates.rs_rates[rate] & IEEE80211_RATE_VAL];
	on->on_tx_rate0 = rt->info[on->on_tx_rix0].rateCode;
	
	on->on_tx_rate0sp = on->on_tx_rate0 |
		rt->info[on->on_tx_rix0].shortPreamble;
	if (sc->sc_mrretry) {
		/*
		 * Hardware supports multi-rate retry; setup two
		 * step-down retry rates and make the lowest rate
		 * be the ``last chance''.  We use 4, 2, 2, 2 tries
		 * respectively (4 is set here, the rest are fixed
		 * in the xmit routine).
		 */
		on->on_tx_try0 = 1 + 3;		/* 4 tries at rate 0 */
		if (--rate >= 0) {
			rix = sc->sc_rixmap[
				ni->ni_rates.rs_rates[rate]&IEEE80211_RATE_VAL];
			on->on_tx_rate1 = rt->info[rix].rateCode;
			on->on_tx_rate1sp = on->on_tx_rate1 |
				rt->info[rix].shortPreamble;
		} else {
			on->on_tx_rate1 = on->on_tx_rate1sp = 0;
		}
		if (--rate >= 0) {
			rix = sc->sc_rixmap[
				ni->ni_rates.rs_rates[rate]&IEEE80211_RATE_VAL];
			on->on_tx_rate2 = rt->info[rix].rateCode;
			on->on_tx_rate2sp = on->on_tx_rate2 |
				rt->info[rix].shortPreamble;
		} else {
			on->on_tx_rate2 = on->on_tx_rate2sp = 0;
		}
		if (rate > 0) {
			/* NB: only do this if we didn't already do it above */
			on->on_tx_rate3 = rt->info[0].rateCode;
			on->on_tx_rate3sp =
				on->on_tx_rate3 | rt->info[0].shortPreamble;
		} else {
			on->on_tx_rate3 = on->on_tx_rate3sp = 0;
		}
	} else {
		on->on_tx_try0 = ATH_TXMAXTRY;	/* max tries at rate 0 */
		on->on_tx_rate1 = on->on_tx_rate1sp = 0;
		on->on_tx_rate2 = on->on_tx_rate2sp = 0;
		on->on_tx_rate3 = on->on_tx_rate3sp = 0;
	}
done:
	on->on_tx_ok = on->on_tx_err = on->on_tx_retr = on->on_tx_upper = 0;
}
Exemple #24
0
static boolean_t
xnbo_open_mac(xnb_t *xnbp, char *mac)
{
	xnbo_t *xnbop = xnbp->xnb_flavour_data;
	int err;
	const mac_info_t *mi;
	void (*rx_fn)(void *, mac_resource_handle_t, mblk_t *, boolean_t);
	struct ether_addr ea;
	uint_t max_sdu;
	mac_diag_t diag;

	if ((err = mac_open_by_linkname(mac, &xnbop->o_mh)) != 0) {
		cmn_err(CE_WARN, "xnbo_open_mac: "
		    "cannot open mac for link %s (%d)", mac, err);
		return (B_FALSE);
	}
	ASSERT(xnbop->o_mh != NULL);

	mi = mac_info(xnbop->o_mh);
	ASSERT(mi != NULL);

	if (mi->mi_media != DL_ETHER) {
		cmn_err(CE_WARN, "xnbo_open_mac: "
		    "device is not DL_ETHER (%d)", mi->mi_media);
		i_xnbo_close_mac(xnbp, B_TRUE);
		return (B_FALSE);
	}
	if (mi->mi_media != mi->mi_nativemedia) {
		cmn_err(CE_WARN, "xnbo_open_mac: "
		    "device media and native media mismatch (%d != %d)",
		    mi->mi_media, mi->mi_nativemedia);
		i_xnbo_close_mac(xnbp, B_TRUE);
		return (B_FALSE);
	}

	mac_sdu_get(xnbop->o_mh, NULL, &max_sdu);
	if (max_sdu > XNBMAXPKT) {
		cmn_err(CE_WARN, "xnbo_open_mac: mac device SDU too big (%d)",
		    max_sdu);
		i_xnbo_close_mac(xnbp, B_TRUE);
		return (B_FALSE);
	}

	/*
	 * MAC_OPEN_FLAGS_MULTI_PRIMARY is relevant when we are migrating a
	 * guest on the localhost itself. In this case we would have the MAC
	 * client open for the guest being migrated *and* also for the
	 * migrated guest (i.e. the former will be active till the migration
	 * is complete when the latter will be activated). This flag states
	 * that it is OK for mac_unicast_add to add the primary MAC unicast
	 * address multiple times.
	 */
	if (mac_client_open(xnbop->o_mh, &xnbop->o_mch, NULL,
	    MAC_OPEN_FLAGS_USE_DATALINK_NAME |
	    MAC_OPEN_FLAGS_MULTI_PRIMARY) != 0) {
		cmn_err(CE_WARN, "xnbo_open_mac: "
		    "error (%d) opening mac client", err);
		i_xnbo_close_mac(xnbp, B_TRUE);
		return (B_FALSE);
	}

	if (xnbop->o_need_rx_filter)
		rx_fn = xnbo_from_mac_filter;
	else
		rx_fn = xnbo_from_mac;

	err = mac_unicast_add_set_rx(xnbop->o_mch, NULL, MAC_UNICAST_PRIMARY,
	    &xnbop->o_mah, 0, &diag, xnbop->o_multicast_control ? rx_fn : NULL,
	    xnbp);
	if (err != 0) {
		cmn_err(CE_WARN, "xnbo_open_mac: failed to get the primary "
		    "MAC address of %s: %d", mac, err);
		i_xnbo_close_mac(xnbp, B_TRUE);
		return (B_FALSE);
	}
	if (!xnbop->o_multicast_control) {
		err = mac_promisc_add(xnbop->o_mch, MAC_CLIENT_PROMISC_ALL,
		    rx_fn, xnbp, &xnbop->o_mphp, MAC_PROMISC_FLAGS_NO_TX_LOOP |
		    MAC_PROMISC_FLAGS_VLAN_TAG_STRIP);
		if (err != 0) {
			cmn_err(CE_WARN, "xnbo_open_mac: "
			    "cannot enable promiscuous mode of %s: %d",
			    mac, err);
			i_xnbo_close_mac(xnbp, B_TRUE);
			return (B_FALSE);
		}
		xnbop->o_promiscuous = B_TRUE;
	}

	if (xnbop->o_need_setphysaddr) {
		err = mac_unicast_primary_set(xnbop->o_mh, xnbp->xnb_mac_addr);
		/* Warn, but continue on. */
		if (err != 0) {
			bcopy(xnbp->xnb_mac_addr, ea.ether_addr_octet,
			    ETHERADDRL);
			cmn_err(CE_WARN, "xnbo_open_mac: "
			    "cannot set MAC address of %s to "
			    "%s: %d", mac, ether_sprintf(&ea), err);
		}
	}

	if (!mac_capab_get(xnbop->o_mh, MAC_CAPAB_HCKSUM,
	    &xnbop->o_hcksum_capab))
		xnbop->o_hcksum_capab = 0;

	xnbop->o_running = B_TRUE;

	return (B_TRUE);
}
Exemple #25
0
static void
ath_hal_dumpkeycache(FILE *fd, int nkeys)
{
	static const char *keytypenames[] = {
		"WEP-40", 	/* AR_KEYTABLE_TYPE_40 */
		"WEP-104",	/* AR_KEYTABLE_TYPE_104 */
		"#2",
		"WEP-128",	/* AR_KEYTABLE_TYPE_128 */
		"TKIP",		/* AR_KEYTABLE_TYPE_TKIP */
		"AES-OCB",	/* AR_KEYTABLE_TYPE_AES */
		"AES-CCM",	/* AR_KEYTABLE_TYPE_CCM */
		"CLR",		/* AR_KEYTABLE_TYPE_CLR */
	};
	int micEnabled = SREV(state.revs.ah_macVersion, state.revs.ah_macRev) < SREV(4,8) ? 0 :
	       OS_REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_CRPT_MIC_ENABLE;
	u_int8_t mac[IEEE80211_ADDR_LEN];
	u_int8_t ismic[128/NBBY];
	int entry;
	int first = 1;

	memset(ismic, 0, sizeof(ismic));
	for (entry = 0; entry < nkeys; entry++) {
		u_int32_t macLo, macHi, type;
		u_int32_t key0, key1, key2, key3, key4;

		macHi = OS_REG_READ(ah, AR_KEYTABLE_MAC1(entry));
		if ((macHi & AR_KEYTABLE_VALID) == 0 && isclr(ismic, entry))
			continue;
		macLo = OS_REG_READ(ah, AR_KEYTABLE_MAC0(entry));
		macHi <<= 1;
		if (macLo & (1<<31))
			macHi |= 1;
		macLo <<= 1;
		mac[4] = macHi & 0xff;
		mac[5] = macHi >> 8;
		mac[0] = macLo & 0xff;
		mac[1] = macLo >> 8;
		mac[2] = macLo >> 16;
		mac[3] = macLo >> 24;
		type = OS_REG_READ(ah, AR_KEYTABLE_TYPE(entry));
		if ((type & 7) == AR_KEYTABLE_TYPE_TKIP && micEnabled)
			setbit(ismic, entry+64);
		key0 = OS_REG_READ(ah, AR_KEYTABLE_KEY0(entry));
		key1 = OS_REG_READ(ah, AR_KEYTABLE_KEY1(entry));
		key2 = OS_REG_READ(ah, AR_KEYTABLE_KEY2(entry));
		key3 = OS_REG_READ(ah, AR_KEYTABLE_KEY3(entry));
		key4 = OS_REG_READ(ah, AR_KEYTABLE_KEY4(entry));
		if (first) {
			fprintf(fd, "\n");
			first = 0;
		}
		fprintf(fd, "KEY[%03u] MAC %s %-7s %02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x\n"
			, entry
			, ether_sprintf(mac)
			, isset(ismic, entry) ? "MIC" : keytypenames[type & 7]
			, (key0 >>  0) & 0xff
			, (key0 >>  8) & 0xff
			, (key0 >> 16) & 0xff
			, (key0 >> 24) & 0xff
			, (key1 >>  0) & 0xff
			, (key1 >>  8) & 0xff
			, (key2 >>  0) & 0xff
			, (key2 >>  8) & 0xff
			, (key2 >> 16) & 0xff
			, (key2 >> 24) & 0xff
			, (key3 >>  0) & 0xff
			, (key3 >>  8) & 0xff
			, (key4 >>  0) & 0xff
			, (key4 >>  8) & 0xff
			, (key4 >> 16) & 0xff
			, (key4 >> 24) & 0xff
		);
	}
}
Exemple #26
0
			printf(" %u-%u,%u", schan, schan + nchan-1,
			    cie->band[i].maxtxpwr);
		else
			printf(" %u,%u", schan, cie->band[i].maxtxpwr);
	}
	printf("]");
}

static void
dump_probe_beacon(uint8_t subtype, int isnew,
	const uint8_t mac[IEEE80211_ADDR_LEN],
	const struct ieee80211_scanparams *sp, int rssi)
{

	printf("[%s] %s%s on chan %u (bss chan %u) ",
	    ether_sprintf(mac), isnew ? "new " : "",
	    ieee80211_mgt_subtype_name[subtype >> IEEE80211_FC0_SUBTYPE_SHIFT],
	    sp->chan, sp->bchan);
	ieee80211_print_essid(sp->ssid + 2, sp->ssid[1]);
	printf(" rssi %d\n", rssi);

	if (isnew) {
		printf("[%s] caps 0x%x bintval %u erp 0x%x", 
			ether_sprintf(mac), sp->capinfo, sp->bintval, sp->erp);
		if (sp->country != NULL)
			dump_country(sp->country);
		printf("\n");
	}
}
#endif /* IEEE80211_DEBUG */
static void
xennet_xenbus_attach(device_t parent, device_t self, void *aux)
{
	struct xennet_xenbus_softc *sc = device_private(self);
	struct xenbusdev_attach_args *xa = aux;
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	int err;
	RING_IDX i;
	char *val, *e, *p;
	int s;
	extern int ifqmaxlen; /* XXX */
#ifdef XENNET_DEBUG
	char **dir;
	int dir_n = 0;
	char id_str[20];
#endif

	aprint_normal(": Xen Virtual Network Interface\n");
	sc->sc_dev = self;

#ifdef XENNET_DEBUG
	printf("path: %s\n", xa->xa_xbusd->xbusd_path);
	snprintf(id_str, sizeof(id_str), "%d", xa->xa_id);
	err = xenbus_directory(NULL, "device/vif", id_str, &dir_n, &dir);
	if (err) {
		aprint_error_dev(self, "xenbus_directory err %d\n", err);
	} else {
		printf("%s/\n", xa->xa_xbusd->xbusd_path);
		for (i = 0; i < dir_n; i++) {
			printf("\t/%s", dir[i]);
			err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, dir[i],
			    NULL, &val);
			if (err) {
				aprint_error_dev(self, "xenbus_read err %d\n", err);
			} else {
				printf(" = %s\n", val);
				free(val, M_DEVBUF);
			}
		}
	}
#endif /* XENNET_DEBUG */
	sc->sc_xbusd = xa->xa_xbusd;
	sc->sc_xbusd->xbusd_otherend_changed = xennet_backend_changed;

	/* initialize free RX and RX request lists */
	SLIST_INIT(&sc->sc_txreq_head);
	for (i = 0; i < NET_TX_RING_SIZE; i++) {
		sc->sc_txreqs[i].txreq_id = i;
		SLIST_INSERT_HEAD(&sc->sc_txreq_head, &sc->sc_txreqs[i],
		    txreq_next);
	}
	SLIST_INIT(&sc->sc_rxreq_head);
	s = splvm();
	for (i = 0; i < NET_RX_RING_SIZE; i++) {
		struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
		rxreq->rxreq_id = i;
		rxreq->rxreq_sc = sc;
		rxreq->rxreq_va = uvm_km_alloc(kernel_map,
		    PAGE_SIZE, PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_ZERO);
		if (rxreq->rxreq_va == 0)
			break;
		if (!pmap_extract(pmap_kernel(), rxreq->rxreq_va,
		    &rxreq->rxreq_pa))
			panic("%s: no pa for mapped va ?", device_xname(self));
		rxreq->rxreq_gntref = GRANT_INVALID_REF;
		SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, rxreq_next);
	}
	splx(s);
	sc->sc_free_rxreql = i;
	if (sc->sc_free_rxreql == 0) {
		aprint_error_dev(self, "failed to allocate rx memory\n");
		return;
	}

	/* read mac address */
	err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, "mac", NULL, &val);
	if (err) {
		aprint_error_dev(self, "can't read mac address, err %d\n", err);
		return;
	}
	/* read mac address */
	for (i = 0, p = val; i < 6; i++) {
		sc->sc_enaddr[i] = strtoul(p, &e, 16);
		if ((e[0] == '\0' && i != 5) && e[0] != ':') {
			aprint_error_dev(self, "%s is not a valid mac address\n", val);
			free(val, M_DEVBUF);
			return;
		}
		p = &e[1];
	}
	free(val, M_DEVBUF);
	aprint_normal_dev(self, "MAC address %s\n",
	    ether_sprintf(sc->sc_enaddr));
	/* Initialize ifnet structure and attach interface */
	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
	ifp->if_softc = sc;
	ifp->if_start = xennet_start;
	ifp->if_ioctl = xennet_ioctl;
	ifp->if_watchdog = xennet_watchdog;
	ifp->if_init = xennet_init;
	ifp->if_stop = xennet_stop;
	ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
	ifp->if_timer = 0;
	ifp->if_snd.ifq_maxlen = max(ifqmaxlen, NET_TX_RING_SIZE * 2);
	ifp->if_capabilities = IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx;
	IFQ_SET_READY(&ifp->if_snd);
	if_attach(ifp);
	ether_ifattach(ifp, sc->sc_enaddr);
	sc->sc_softintr = softint_establish(SOFTINT_NET, xennet_softstart, sc);
	if (sc->sc_softintr == NULL)
		panic("%s: can't establish soft interrupt",
			device_xname(self));

	/* initialise shared structures and tell backend that we are ready */
	xennet_xenbus_resume(sc);
}
Exemple #28
0
/*
 * Join an infrastructure network
 */
int
ieee80211_sta_join(struct ieee80211vap *vap, ieee80211_scan_entry_t scan_entry)
{
    struct ieee80211com *ic = vap->iv_ic;
    struct ieee80211_node_table *nt = &ic->ic_sta;
    struct ieee80211_node *ni = NULL;
    const u_int8_t *macaddr = ieee80211_scan_entry_macaddr(scan_entry);
    int error = 0;

    ASSERT(vap->iv_opmode == IEEE80211_M_STA);
    
    ni = ieee80211_find_node(nt, macaddr);
    if (ni) {
        /* 
         * reusing old node has a potential for several bugs . The old node may have some state info from previous association.
         * get rid of the old bss node and create a new bss node.
         */
        ieee80211_sta_leave(ni); 
        ieee80211_free_node(ni); 
    }
    /*
     * Create a BSS node.
     */
    ni = ieee80211_alloc_node(nt, vap, macaddr);
    if (ni == NULL)
        return -ENOMEM;
    /* set the maximum number frmaes to be queued when the vap is in fake sleep */        
    ieee80211_node_saveq_set_param(ni,IEEE80211_NODE_SAVEQ_DATA_Q_LEN,IEE80211_STA_MAX_NODE_SAVEQ_LEN);
    /* To become a bss node, a node need an extra reference count, which alloc node already gives */
#ifdef IEEE80211_DEBUG_REFCNT
    ieee80211_note(ni->ni_vap,"%s ,line %u: increase node %p <%s> refcnt to %d\n",
                   __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr),
                   ieee80211_node_refcnt(ni));
#endif

    /* setup the bss node for association */
    error = ieee80211_setup_node(ni, scan_entry);
    if (error != 0) {
        ieee80211_free_node(ni);
        return error;
    }

    /* copy the beacon timestamp */
    OS_MEMCPY(ni->ni_tstamp.data,
              ieee80211_scan_entry_tsf(scan_entry),
              sizeof(ni->ni_tstamp));

    /*
     * Join the BSS represented by this new node.
     * This function will free up the old BSS node
     * and use this one as the new BSS node.
     */
    ieee80211_sta_join_bss(ni);

    IEEE80211_ADD_NODE_TARGET(ni, ni->ni_vap, 0);

    /* Save our home channel */
    vap->iv_bsschan = ni->ni_chan;
    vap->iv_cur_mode = ieee80211_chan2mode(ni->ni_chan);

    /* Update the DotH falg */
    ieee80211_update_spectrumrequirement(vap);

    /*
     *  The OS will control our security keys.  
     *  If clear, keys will be cleared.
     *  If static WEP, keys will be plumbed before JoinInfra.
     *  If WPA/WPA2, ciphers will be setup, but no keys will be plumbed until 
     *    after they are negotiated.
     *  XXX We should ASSERT that all of the foregoing is true.
     */
    return 0;
}
Exemple #29
0
/*
 * IEEE80211_M_IBSS+IEEE80211_M_AHDEMO vap state machine handler.
 */
static int
adhoc_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
	struct ieee80211com *ic = vap->iv_ic;
	struct ieee80211_node *ni;
	enum ieee80211_state ostate;

	IEEE80211_LOCK_ASSERT(vap->iv_ic);

	ostate = vap->iv_state;
	IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n",
	    __func__, ieee80211_state_name[ostate],
	    ieee80211_state_name[nstate], arg);
	vap->iv_state = nstate;			/* state transition */
	if (ostate != IEEE80211_S_SCAN)
		ieee80211_cancel_scan(vap);	/* background scan */
	ni = vap->iv_bss;			/* NB: no reference held */
	switch (nstate) {
	case IEEE80211_S_INIT:
		switch (ostate) {
		case IEEE80211_S_SCAN:
			ieee80211_cancel_scan(vap);
			break;
		default:
			break;
		}
		if (ostate != IEEE80211_S_INIT) {
			/* NB: optimize INIT -> INIT case */
			ieee80211_reset_bss(vap);
		}
		break;
	case IEEE80211_S_SCAN:
		switch (ostate) {
		case IEEE80211_S_RUN:		/* beacon miss */
			/* purge station table; entries are stale */
			ieee80211_iterate_nodes(&ic->ic_sta, sta_leave, vap);
			/* fall thru... */
		case IEEE80211_S_INIT:
			if (vap->iv_des_chan != IEEE80211_CHAN_ANYC &&
			    !IEEE80211_IS_CHAN_RADAR(vap->iv_des_chan)) {
				/*
				 * Already have a channel; bypass the
				 * scan and startup immediately.
				 */
				ieee80211_create_ibss(vap,
				    ieee80211_ht_adjust_channel(ic,
				    vap->iv_des_chan, vap->iv_flags_ht));
				break;
			}
			/*
			 * Initiate a scan.  We can come here as a result
			 * of an IEEE80211_IOC_SCAN_REQ too in which case
			 * the vap will be marked with IEEE80211_FEXT_SCANREQ
			 * and the scan request parameters will be present
			 * in iv_scanreq.  Otherwise we do the default.
			 */
			if (vap->iv_flags_ext & IEEE80211_FEXT_SCANREQ) {
				ieee80211_check_scan(vap,
				    vap->iv_scanreq_flags,
				    vap->iv_scanreq_duration,
				    vap->iv_scanreq_mindwell,
				    vap->iv_scanreq_maxdwell,
				    vap->iv_scanreq_nssid, vap->iv_scanreq_ssid);
				vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ;
			} else
				ieee80211_check_scan_current(vap);
			break;
		case IEEE80211_S_SCAN:
			/*
			 * This can happen because of a change in state
			 * that requires a reset.  Trigger a new scan
			 * unless we're in manual roaming mode in which
			 * case an application must issue an explicit request.
			 */
			if (vap->iv_roaming == IEEE80211_ROAMING_AUTO)
				ieee80211_check_scan_current(vap);
			break;
		default:
			goto invalid;
		}
		break;
	case IEEE80211_S_RUN:
		if (vap->iv_flags & IEEE80211_F_WPA) {
			/* XXX validate prerequisites */
		}
		switch (ostate) {
		case IEEE80211_S_SCAN:
#ifdef IEEE80211_DEBUG
			if (ieee80211_msg_debug(vap)) {
				ieee80211_note(vap,
				    "synchronized with %s ssid ",
				    ether_sprintf(ni->ni_bssid));
				ieee80211_print_essid(vap->iv_bss->ni_essid,
				    ni->ni_esslen);
				/* XXX MCS/HT */
				printf(" channel %d start %uMb\n",
				    ieee80211_chan2ieee(ic, ic->ic_curchan),
				    IEEE80211_RATE2MBS(ni->ni_txrate));
			}
#endif
			break;
		case IEEE80211_S_RUN:	/* IBSS merge */
			break;
		default:
			goto invalid;
		}
		/*
		 * When 802.1x is not in use mark the port authorized
		 * at this point so traffic can flow.
		 */
		if (ni->ni_authmode != IEEE80211_AUTH_8021X)
			ieee80211_node_authorize(ni);
		/*
		 * Fake association when joining an existing bss.
		 */
		if (!IEEE80211_ADDR_EQ(ni->ni_macaddr, vap->iv_myaddr) &&
		    ic->ic_newassoc != NULL)
			ic->ic_newassoc(ni, ostate != IEEE80211_S_RUN);
		break;
	case IEEE80211_S_SLEEP:
		vap->iv_sta_ps(vap, 0);
		break;
	default:
	invalid:
		IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
		    "%s: unexpected state transition %s -> %s\n", __func__,
		    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
		break;
	}
	return 0;
}
Exemple #30
0
        KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
        memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop),
               ETHER_ADDR_LEN);
    } else
        /* Check if there is already a MAC address in the register */
        if ((smsc_read_reg(sc, SMSC_MAC_ADDRL, &mac_l) == 0) &&
                (smsc_read_reg(sc, SMSC_MAC_ADDRH, &mac_h) == 0)) {
            sc->sc_enaddr[5] = (uint8_t)((mac_h >> 8) & 0xff);
            sc->sc_enaddr[4] = (uint8_t)((mac_h) & 0xff);
            sc->sc_enaddr[3] = (uint8_t)((mac_l >> 24) & 0xff);
            sc->sc_enaddr[2] = (uint8_t)((mac_l >> 16) & 0xff);
            sc->sc_enaddr[1] = (uint8_t)((mac_l >> 8) & 0xff);
            sc->sc_enaddr[0] = (uint8_t)((mac_l) & 0xff);
        }

    aprint_normal_dev(self, " Ethernet address %s\n", ether_sprintf(sc->sc_enaddr));

    IFQ_SET_READY(&ifp->if_snd);

    /* Initialize MII/media info. */
    mii = &sc->sc_mii;
    mii->mii_ifp = ifp;
    mii->mii_readreg = smsc_miibus_readreg;
    mii->mii_writereg = smsc_miibus_writereg;
    mii->mii_statchg = smsc_miibus_statchg;
    mii->mii_flags = MIIF_AUTOTSLEEP;
    sc->sc_ec.ec_mii = mii;
    ifmedia_init(&mii->mii_media, 0, smsc_ifmedia_upd, smsc_ifmedia_sts);
    mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);

    if (LIST_FIRST(&mii->mii_phys) == NULL) {