Example #1
0
/*
 * Prepare a vap for use.  Drivers use this call to
 * setup net80211 state in new vap's prior attaching
 * them with ieee80211_vap_attach (below).
 */
int
ieee80211_vap_setup(struct ieee80211com *ic, struct ieee80211vap *vap,
    const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode,
    int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
    const uint8_t macaddr[IEEE80211_ADDR_LEN])
{
	struct ifnet *ifp;

	ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		if_printf(ic->ic_ifp, "%s: unable to allocate ifnet\n",
		    __func__);
		return ENOMEM;
	}
	if_initname(ifp, name, unit);
	ifp->if_softc = vap;			/* back pointer */
	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
	ifp->if_start = ieee80211_start;
	ifp->if_ioctl = ieee80211_ioctl;
	ifp->if_init = ieee80211_init;
	/* NB: input+output filled in by ether_ifattach */
	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
	IFQ_SET_READY(&ifp->if_snd);

	vap->iv_ifp = ifp;
	vap->iv_ic = ic;
	vap->iv_flags = ic->ic_flags;		/* propagate common flags */
	vap->iv_flags_ext = ic->ic_flags_ext;
	vap->iv_flags_ven = ic->ic_flags_ven;
	vap->iv_caps = ic->ic_caps &~ IEEE80211_C_OPMODE;
	vap->iv_htcaps = ic->ic_htcaps;
	vap->iv_htextcaps = ic->ic_htextcaps;
	vap->iv_opmode = opmode;
	vap->iv_caps |= ieee80211_opcap[opmode];
	switch (opmode) {
	case IEEE80211_M_WDS:
		/*
		 * WDS links must specify the bssid of the far end.
		 * For legacy operation this is a static relationship.
		 * For non-legacy operation the station must associate
		 * and be authorized to pass traffic.  Plumbing the
		 * vap to the proper node happens when the vap
		 * transitions to RUN state.
		 */
		IEEE80211_ADDR_COPY(vap->iv_des_bssid, bssid);
		vap->iv_flags |= IEEE80211_F_DESBSSID;
		if (flags & IEEE80211_CLONE_WDSLEGACY)
			vap->iv_flags_ext |= IEEE80211_FEXT_WDSLEGACY;
		break;
#ifdef IEEE80211_SUPPORT_TDMA
	case IEEE80211_M_AHDEMO:
		if (flags & IEEE80211_CLONE_TDMA) {
			/* NB: checked before clone operation allowed */
			KASSERT(ic->ic_caps & IEEE80211_C_TDMA,
			    ("not TDMA capable, ic_caps 0x%x", ic->ic_caps));
			/*
			 * Propagate TDMA capability to mark vap; this
			 * cannot be removed and is used to distinguish
			 * regular ahdemo operation from ahdemo+tdma.
			 */
			vap->iv_caps |= IEEE80211_C_TDMA;
		}
		break;
#endif
	default:
		break;
	}
	/* auto-enable s/w beacon miss support */
	if (flags & IEEE80211_CLONE_NOBEACONS)
		vap->iv_flags_ext |= IEEE80211_FEXT_SWBMISS;
	/* auto-generated or user supplied MAC address */
	if (flags & (IEEE80211_CLONE_BSSID|IEEE80211_CLONE_MACADDR))
		vap->iv_flags_ext |= IEEE80211_FEXT_UNIQMAC;
	/*
	 * Enable various functionality by default if we're
	 * capable; the driver can override us if it knows better.
	 */
	if (vap->iv_caps & IEEE80211_C_WME)
		vap->iv_flags |= IEEE80211_F_WME;
	if (vap->iv_caps & IEEE80211_C_BURST)
		vap->iv_flags |= IEEE80211_F_BURST;
	/* NB: bg scanning only makes sense for station mode right now */
	if (vap->iv_opmode == IEEE80211_M_STA &&
	    (vap->iv_caps & IEEE80211_C_BGSCAN))
		vap->iv_flags |= IEEE80211_F_BGSCAN;
	vap->iv_flags |= IEEE80211_F_DOTH;	/* XXX no cap, just ena */
	/* NB: DFS support only makes sense for ap mode right now */
	if (vap->iv_opmode == IEEE80211_M_HOSTAP &&
	    (vap->iv_caps & IEEE80211_C_DFS))
		vap->iv_flags_ext |= IEEE80211_FEXT_DFS;

	vap->iv_des_chan = IEEE80211_CHAN_ANYC;		/* any channel is ok */
	vap->iv_bmissthreshold = IEEE80211_HWBMISS_DEFAULT;
	vap->iv_dtim_period = IEEE80211_DTIM_DEFAULT;
	/*
	 * Install a default reset method for the ioctl support;
	 * the driver can override this.
	 */
	vap->iv_reset = default_reset;

	IEEE80211_ADDR_COPY(vap->iv_myaddr, macaddr);

	ieee80211_sysctl_vattach(vap);
	ieee80211_crypto_vattach(vap);
	ieee80211_node_vattach(vap);
	ieee80211_power_vattach(vap);
	ieee80211_proto_vattach(vap);
#ifdef IEEE80211_SUPPORT_SUPERG
	ieee80211_superg_vattach(vap);
#endif
	ieee80211_ht_vattach(vap);
	ieee80211_scan_vattach(vap);
	ieee80211_regdomain_vattach(vap);
	ieee80211_radiotap_vattach(vap);
	ieee80211_ratectl_set(vap, IEEE80211_RATECTL_NONE);

	return 0;
}
Example #2
0
/**
 * Module/ driver initialization. Creates the linux network
 * devices.
 *
 * @return Zero on success
 */
int cvm_oct_init_module(device_t bus)
{
	device_t dev;
	int ifnum;
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;

	cvm_oct_rx_initialize();
	cvm_oct_configure_common_hw(bus);

	cvmx_helper_initialize_packet_io_global();

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = 0; port < num_ports; port++) {
			cvmx_pip_prt_tagx_t pip_prt_tagx;
			int pkind = cvmx_helper_get_ipd_port(interface, port);

			pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(pkind));
			pip_prt_tagx.s.grp = pow_receive_group;
			cvmx_write_csr(CVMX_PIP_PRT_TAGX(pkind), pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	cvm_oct_link_taskq = taskqueue_create("octe link", M_NOWAIT,
	    taskqueue_thread_enqueue, &cvm_oct_link_taskq);
	taskqueue_start_threads(&cvm_oct_link_taskq, 1, PI_NET,
	    "octe link taskq");

	/* Initialize the FAU used for counting packet buffers that need to be freed */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	ifnum = 0;
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     ifnum++, port++) {
			cvm_oct_private_t *priv;
			struct ifnet *ifp;
			
			dev = BUS_ADD_CHILD(bus, 0, "octe", ifnum);
			if (dev != NULL)
				ifp = if_alloc(IFT_ETHER);
			if (dev == NULL || ifp == NULL) {
				printf("Failed to allocate ethernet device for interface %d port %d\n", interface, port);
				continue;
			}

			/* Initialize the device private structure. */
			device_probe(dev);
			priv = device_get_softc(dev);
			priv->dev = dev;
			priv->ifp = ifp;
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++)
				cvmx_fau_atomic_write32(priv->fau+qos*4, 0);
			TASK_INIT(&priv->link_task, 0, cvm_oct_update_link, priv);

			switch (priv->imode) {

			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				priv->init = cvm_oct_common_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon NPI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				priv->init = cvm_oct_xaui_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon XAUI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				priv->init = cvm_oct_common_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon LOOP Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				priv->init = cvm_oct_sgmii_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon SGMII Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				priv->init = cvm_oct_spi_init;
				priv->uninit = cvm_oct_spi_uninit;
				device_set_desc(dev, "Cavium Octeon SPI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
				priv->init = cvm_oct_rgmii_init;
				priv->uninit = cvm_oct_rgmii_uninit;
				device_set_desc(dev, "Cavium Octeon RGMII Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_GMII:
				priv->init = cvm_oct_rgmii_init;
				priv->uninit = cvm_oct_rgmii_uninit;
				device_set_desc(dev, "Cavium Octeon GMII Ethernet");
				break;
			}

			ifp->if_softc = priv;

			if (!priv->init) {
				printf("octe%d: unsupported device type interface %d, port %d\n",
				       ifnum, interface, priv->port);
				if_free(ifp);
			} else if (priv->init(ifp) != 0) {
				printf("octe%d: failed to register device for interface %d, port %d\n",
				       ifnum, interface, priv->port);
				if_free(ifp);
			} else {
				cvm_oct_device[priv->port] = ifp;
				fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(uint32_t);
			}
		}
	}

	if (INTERRUPT_LIMIT) {
		/* Set the POW timer rate to give an interrupt at most INTERRUPT_LIMIT times per second */
		cvmx_write_csr(CVMX_POW_WQ_INT_PC, cvmx_clock_get_rate(CVMX_CLOCK_CORE)/(INTERRUPT_LIMIT*16*256)<<8);

		/* Enable POW timer interrupt. It will count when there are packets available */
		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1ful<<24);
	} else {
		/* Enable POW interrupt when our port has at least one packet */
		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
	}

	callout_init(&cvm_oct_poll_timer, 1);
	callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL);

	return 0;
}
Example #3
0
/*
 * Standard attach entry point.
 *
 * Called when the driver is loaded.  It allocates needed resources,
 * and initializes the "hardware" and software.
 */
static int
netvsc_attach(device_t dev)
{
	struct hv_device *device_ctx = vmbus_get_devctx(dev);
	netvsc_device_info device_info;
	hn_softc_t *sc;
	int unit = device_get_unit(dev);
	struct ifnet *ifp;
	int ret;

	netvsc_init();

	sc = device_get_softc(dev);
	if (sc == NULL) {
		return (ENOMEM);
	}

	bzero(sc, sizeof(hn_softc_t));
	sc->hn_unit = unit;
	sc->hn_dev = dev;

	NV_LOCK_INIT(sc, "NetVSCLock");

	sc->hn_dev_obj = device_ctx;

	ifp = sc->hn_ifp = sc->arpcom.ac_ifp = if_alloc(IFT_ETHER);
	ifp->if_softc = sc;

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_dunit = unit;
	ifp->if_dname = NETVSC_DEVNAME;

	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = hn_ioctl;
	ifp->if_start = hn_start;
	ifp->if_init = hn_ifinit;
	/* needed by hv_rf_on_device_add() code */
	ifp->if_mtu = ETHERMTU;
	IFQ_SET_MAXLEN(&ifp->if_snd, 512);
	ifp->if_snd.ifq_drv_maxlen = 511;
	IFQ_SET_READY(&ifp->if_snd);

	/*
	 * Tell upper layers that we support full VLAN capability.
	 */
	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
	ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;

	ret = hv_rf_on_device_add(device_ctx, &device_info);
	if (ret != 0) {
		if_free(ifp);

		return (ret);
	}
	if (device_info.link_state == 0) {
		sc->hn_carrier = 1;
	}

	ether_ifattach(ifp, device_info.mac_addr);

	return (0);
}
Example #4
0
/*
 * Add a mif to the mif table
 */
static int
add_m6if(struct mif6ctl *mifcp)
{
	struct mif6 *mifp;
	struct ifnet *ifp;
	int error;

	MIF6_LOCK();

	if (mifcp->mif6c_mifi >= MAXMIFS) {
		MIF6_UNLOCK();
		return (EINVAL);
	}
	mifp = mif6table + mifcp->mif6c_mifi;
	if (mifp->m6_ifp != NULL) {
		MIF6_UNLOCK();
		return (EADDRINUSE); /* XXX: is it appropriate? */
	}
	if (mifcp->mif6c_pifi == 0 || mifcp->mif6c_pifi > V_if_index) {
		MIF6_UNLOCK();
		return (ENXIO);
	}

	ifp = ifnet_byindex(mifcp->mif6c_pifi);

	if (mifcp->mif6c_flags & MIFF_REGISTER) {
		if (reg_mif_num == (mifi_t)-1) {
			ifp = if_alloc(IFT_OTHER);

			if_initname(ifp, "register_mif", 0);
			ifp->if_flags |= IFF_LOOPBACK;
			if_attach(ifp);
			multicast_register_if6 = ifp;
			reg_mif_num = mifcp->mif6c_mifi;
			/*
			 * it is impossible to guess the ifindex of the
			 * register interface.  So mif6c_pifi is automatically
			 * calculated.
			 */
			mifcp->mif6c_pifi = ifp->if_index;
		} else {
			ifp = multicast_register_if6;
		}
	} else {
		/* Make sure the interface supports multicast */
		if ((ifp->if_flags & IFF_MULTICAST) == 0) {
			MIF6_UNLOCK();
			return (EOPNOTSUPP);
		}

		error = if_allmulti(ifp, 1);
		if (error) {
			MIF6_UNLOCK();
			return (error);
		}
	}

	mifp->m6_flags     = mifcp->mif6c_flags;
	mifp->m6_ifp       = ifp;

	/* initialize per mif pkt counters */
	mifp->m6_pkt_in    = 0;
	mifp->m6_pkt_out   = 0;
	mifp->m6_bytes_in  = 0;
	mifp->m6_bytes_out = 0;

	/* Adjust nummifs up if the mifi is higher than nummifs */
	if (nummifs <= mifcp->mif6c_mifi)
		nummifs = mifcp->mif6c_mifi + 1;

	MIF6_UNLOCK();
	MRT6_DLOG(DEBUG_ANY, "mif #%d, phyint %s", mifcp->mif6c_mifi,
	    if_name(ifp));

	return (0);
}
Example #5
0
/*
 * Install interface into kernel networking data structures
 */
int
ed_attach(device_t dev)
{
	struct ed_softc *sc = device_get_softc(dev);
	struct ifnet *ifp;

	sc->dev = dev;
	ED_LOCK_INIT(sc);
	ifp = sc->ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(dev, "can not if_alloc()\n");
		ED_LOCK_DESTROY(sc);
		return (ENOSPC);
	}

	if (sc->readmem == NULL) {
		if (sc->mem_shared) {
			if (sc->isa16bit)
				sc->readmem = ed_shmem_readmem16;
			else
				sc->readmem = ed_shmem_readmem8;
		} else {
			sc->readmem = ed_pio_readmem;
		}
	}
	if (sc->sc_write_mbufs == NULL) {
		device_printf(dev, "No write mbufs routine set\n");
		return (ENXIO);
	}

	callout_init_mtx(&sc->tick_ch, ED_MUTEX(sc), 0);
	/*
	 * Set interface to stopped condition (reset)
	 */
	ed_stop_hw(sc);

	/*
	 * Initialize ifnet structure
	 */
	ifp->if_softc = sc;
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_start = ed_start;
	ifp->if_ioctl = ed_ioctl;
	ifp->if_init = ed_init;
	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
	IFQ_SET_READY(&ifp->if_snd);
	ifp->if_linkmib = &sc->mibdata;
	ifp->if_linkmiblen = sizeof sc->mibdata;
	/*
	 * XXX - should do a better job.
	 */
	if (sc->chip_type == ED_CHIP_TYPE_WD790)
		sc->mibdata.dot3StatsEtherChipSet =
			DOT3CHIPSET(dot3VendorWesternDigital,
				    dot3ChipSetWesternDigital83C790);
	else
		sc->mibdata.dot3StatsEtherChipSet =
			DOT3CHIPSET(dot3VendorNational, 
				    dot3ChipSetNational8390);
	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS;

	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	/*
	 * Set default state for LINK2 flag (used to disable the 
	 * tranceiver for AUI operation), based on config option.
	 * We only set this flag before we attach the device, so there's
	 * no race.  It is convenient to allow users to turn this off
	 * by default in the kernel config, but given our more advanced
	 * boot time configuration options, this might no longer be needed.
	 */
	if (device_get_flags(dev) & ED_FLAGS_DISABLE_TRANCEIVER)
		ifp->if_flags |= IFF_LINK2;

	/*
	 * Attach the interface
	 */
	ether_ifattach(ifp, sc->enaddr);
	/* device attach does transition from UNCONFIGURED to IDLE state */

	sc->tx_mem = sc->txb_cnt * ED_PAGE_SIZE * ED_TXBUF_SIZE;
	sc->rx_mem = (sc->rec_page_stop - sc->rec_page_start) * ED_PAGE_SIZE;
	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
	    0, "type", CTLFLAG_RD, sc->type_str, 0,
	    "Type of chip in card");
	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
	    1, "TxMem", CTLFLAG_RD, &sc->tx_mem, 0,
	    "Memory set aside for transmitting packets");
	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
	    2, "RxMem", CTLFLAG_RD, &sc->rx_mem, 0,
	    "Memory  set aside for receiving packets");
	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
	    3, "Mem", CTLFLAG_RD, &sc->mem_size, 0,
	    "Total Card Memory");
	if (bootverbose) {
		if (sc->type_str && (*sc->type_str != 0))
			device_printf(dev, "type %s ", sc->type_str);
		else
			device_printf(dev, "type unknown (0x%x) ", sc->type);

#ifdef ED_HPP
		if (sc->vendor == ED_VENDOR_HP)
			printf("(%s %s IO)",
			    (sc->hpp_id & ED_HPP_ID_16_BIT_ACCESS) ?
			    "16-bit" : "32-bit",
			    sc->hpp_mem_start ? "memory mapped" : "regular");
		else
#endif
			printf("%s", sc->isa16bit ? "(16 bit)" : "(8 bit)");

#if defined(ED_HPP) || defined(ED_3C503)
		printf("%s", (((sc->vendor == ED_VENDOR_3COM) ||
				    (sc->vendor == ED_VENDOR_HP)) &&
			   (ifp->if_flags & IFF_LINK2)) ?
		    " tranceiver disabled" : "");
#endif
		printf("\n");
	}
	return (0);
}
Example #6
0
static int
admsw_attach(device_t dev)
{
	uint8_t enaddr[ETHER_ADDR_LEN];
	struct admsw_softc *sc = (struct admsw_softc *) device_get_softc(dev);
	struct ifnet *ifp;
	int error, i, rid;

	sc->sc_dev = dev;
	device_printf(dev, "ADM5120 Switch Engine, %d ports\n", SW_DEVS);
	sc->ndevs = 0;

	/* XXXMIPS: fix it */
	enaddr[0] = 0x00;
	enaddr[1] = 0x0C;
	enaddr[2] = 0x42;
	enaddr[3] = 0x07;
	enaddr[4] = 0xB2;
	enaddr[5] = 0x4E;

	memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr));

	device_printf(sc->sc_dev, "base Ethernet address %s\n",
	    ether_sprintf(enaddr));
	callout_init(&sc->sc_watchdog, 1);

	rid = 0;
	if ((sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 
	    RF_ACTIVE)) == NULL) {
                device_printf(dev, "unable to allocate memory resource\n");
                return (ENXIO);
        }

	/* Hook up the interrupt handler. */
	rid = 0;
	if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 
	    RF_SHAREABLE | RF_ACTIVE)) == NULL) {
                device_printf(dev, "unable to allocate IRQ resource\n");
                return (ENXIO);
        }

	if ((error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET, 
	    admsw_intr, NULL, sc, &sc->sc_ih)) != 0) {
                device_printf(dev, 
                    "WARNING: unable to register interrupt handler\n");
                return (error);
        }

	/*
	 * Allocate the control data structures, and create and load the
	 * DMA map for it.
	 */
	if ((error = bus_dma_tag_create(NULL, 4, 0, 
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
	    NULL, NULL, sizeof(struct admsw_control_data), 1,
	    sizeof(struct admsw_control_data), 0, NULL, NULL, 
	    &sc->sc_control_dmat)) != 0) {
		device_printf(sc->sc_dev, 
		    "unable to create control data DMA map, error = %d\n", 
		    error);
		return (error);
	}

	if ((error = bus_dmamem_alloc(sc->sc_control_dmat,
	    (void **)&sc->sc_control_data, BUS_DMA_NOWAIT, 
	    &sc->sc_cddmamap)) != 0) {
		device_printf(sc->sc_dev, 
		    "unable to allocate control data, error = %d\n", error);
		return (error);
	}

	if ((error = bus_dmamap_load(sc->sc_control_dmat, sc->sc_cddmamap,
	    sc->sc_control_data, sizeof(struct admsw_control_data), 
	    admsw_dma_map_addr, &sc->sc_cddma, 0)) != 0) {
		device_printf(sc->sc_dev, 
		    "unable to load control data DMA map, error = %d\n", error);
		return (error);
	}

	/*
	 * Create the transmit buffer DMA maps.
	 */
	if ((error = bus_dma_tag_create(NULL, 1, 0, 
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
	    NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, 
	    &sc->sc_bufs_dmat)) != 0) {
		device_printf(sc->sc_dev, 
		    "unable to create control data DMA map, error = %d\n", 
		    error);
		return (error);
	}

	for (i = 0; i < ADMSW_NTXHDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
		    &sc->sc_txhsoft[i].ds_dmamap)) != 0) {
			device_printf(sc->sc_dev, 
			    "unable to create txh DMA map %d, error = %d\n", 
			    i, error);
			return (error);
		}
		sc->sc_txhsoft[i].ds_mbuf = NULL;
	}

	for (i = 0; i < ADMSW_NTXLDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
		    &sc->sc_txlsoft[i].ds_dmamap)) != 0) {
			device_printf(sc->sc_dev, 
			    "unable to create txl DMA map %d, error = %d\n", 
			    i, error);
			return (error);
		}
		sc->sc_txlsoft[i].ds_mbuf = NULL;
	}

	/*
	 * Create the receive buffer DMA maps.
	 */
	for (i = 0; i < ADMSW_NRXHDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0, 
		     &sc->sc_rxhsoft[i].ds_dmamap)) != 0) {
			device_printf(sc->sc_dev, 
			    "unable to create rxh DMA map %d, error = %d\n", 
			    i, error);
			return (error);
		}
		sc->sc_rxhsoft[i].ds_mbuf = NULL;
	}

	for (i = 0; i < ADMSW_NRXLDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
		    &sc->sc_rxlsoft[i].ds_dmamap)) != 0) {
			device_printf(sc->sc_dev, 
			    "unable to create rxl DMA map %d, error = %d\n",
			    i, error);
			return (error);
		}
		sc->sc_rxlsoft[i].ds_mbuf = NULL;
	}

	admsw_init_bufs(sc);
	admsw_reset(sc);

	for (i = 0; i < SW_DEVS; i++) {
		ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, 
		    admsw_mediastatus);
		ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL);
		ifmedia_add(&sc->sc_ifmedia[i], 
		    IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
		ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL);
		ifmedia_add(&sc->sc_ifmedia[i], 
		    IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
		ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL);
		ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO);

		ifp = sc->sc_ifnet[i] = if_alloc(IFT_ETHER);

		/* Setup interface parameters */
		ifp->if_softc = sc;
		if_initname(ifp, device_get_name(dev), i);
		ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
		ifp->if_ioctl = admsw_ioctl;
		ifp->if_output = ether_output;
		ifp->if_start = admsw_start;
		ifp->if_init = admsw_init;
		ifp->if_mtu = ETHERMTU;
		ifp->if_baudrate = IF_Mbps(100);
		IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, ifqmaxlen));
		ifp->if_snd.ifq_drv_maxlen = max(ADMSW_NTXLDESC, ifqmaxlen);
		IFQ_SET_READY(&ifp->if_snd);
		ifp->if_capabilities |= IFCAP_VLAN_MTU;

		/* Attach the interface. */
		ether_ifattach(ifp, enaddr);
		enaddr[5]++;
	}

	/* XXX: admwdog_attach(sc); */

	/* leave interrupts and cpu port disabled */
	return (0);
}
Example #7
0
static int
axgbe_attach(device_t dev)
{
	struct axgbe_softc *sc;
	struct ifnet *ifp;
	pcell_t phy_handle;
	device_t phydev;
	phandle_t node, phy_node;
	struct resource *mac_res[11];
	struct resource *phy_res[4];
	ssize_t len;
	int error, i, j;

	sc = device_get_softc(dev);

	node = ofw_bus_get_node(dev);
	if (OF_getencprop(node, "phy-handle", &phy_handle,
	    sizeof(phy_handle)) <= 0) {
		phy_node = node;

		if (bus_alloc_resources(dev, mac_spec, mac_res)) {
			device_printf(dev,
			    "could not allocate phy resources\n");
			return (ENXIO);
		}

		sc->prv.xgmac_res = mac_res[0];
		sc->prv.xpcs_res = mac_res[1];
		sc->prv.rxtx_res = mac_res[2];
		sc->prv.sir0_res = mac_res[3];
		sc->prv.sir1_res = mac_res[4];

		sc->prv.dev_irq_res = mac_res[5];
		sc->prv.per_channel_irq = OF_hasprop(node,
		    XGBE_DMA_IRQS_PROPERTY);
		for (i = 0, j = 6; j < nitems(mac_res) - 1 &&
		    mac_res[j + 1] != NULL; i++, j++) {
			if (sc->prv.per_channel_irq) {
				sc->prv.chan_irq_res[i] = mac_res[j];
			}
		}

		/* The last entry is the auto-negotiation interrupt */
		sc->prv.an_irq_res = mac_res[j];
	} else {
		phydev = OF_device_from_xref(phy_handle);
		phy_node = ofw_bus_get_node(phydev);

		if (bus_alloc_resources(phydev, old_phy_spec, phy_res)) {
			device_printf(dev,
			    "could not allocate phy resources\n");
			return (ENXIO);
		}

		if (bus_alloc_resources(dev, old_mac_spec, mac_res)) {
			device_printf(dev,
			    "could not allocate mac resources\n");
			return (ENXIO);
		}

		sc->prv.rxtx_res = phy_res[0];
		sc->prv.sir0_res = phy_res[1];
		sc->prv.sir1_res = phy_res[2];
		sc->prv.an_irq_res = phy_res[3];

		sc->prv.xgmac_res = mac_res[0];
		sc->prv.xpcs_res = mac_res[1];
		sc->prv.dev_irq_res = mac_res[2];
		sc->prv.per_channel_irq = OF_hasprop(node,
		    XGBE_DMA_IRQS_PROPERTY);
		if (sc->prv.per_channel_irq) {
			for (i = 0, j = 3; i < nitems(sc->prv.chan_irq_res) &&
			    mac_res[j] != NULL; i++, j++) {
				sc->prv.chan_irq_res[i] = mac_res[j];
			}
		}
	}

	if ((len = OF_getproplen(node, "mac-address")) < 0) {
		device_printf(dev, "No mac-address property\n");
		return (EINVAL);
	}

	if (len != ETHER_ADDR_LEN)
		return (EINVAL);

	OF_getprop(node, "mac-address", sc->mac_addr, ETHER_ADDR_LEN);

	sc->prv.netdev = ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(dev, "Cannot alloc ifnet\n");
		return (ENXIO);
	}

	sc->prv.dev = dev;
	sc->prv.dmat = bus_get_dma_tag(dev);
	sc->prv.phy.advertising = ADVERTISED_10000baseKR_Full |
	    ADVERTISED_1000baseKX_Full;


	/*
	 * Read the needed properties from the phy node.
	 */

	/* This is documented as optional, but Linux requires it */
	if (OF_getencprop(phy_node, XGBE_SPEEDSET_PROPERTY, &sc->prv.speed_set,
	    sizeof(sc->prv.speed_set)) <= 0) {
		device_printf(dev, "%s property is missing\n",
		    XGBE_SPEEDSET_PROPERTY);
		return (EINVAL);
	}

	error = axgbe_get_optional_prop(dev, phy_node, XGBE_BLWC_PROPERTY,
	    sc->prv.serdes_blwc, sizeof(sc->prv.serdes_blwc));
	if (error > 0) {
		return (error);
	} else if (error < 0) {
		sc->prv.serdes_blwc[0] = XGBE_SPEED_1000_BLWC;
		sc->prv.serdes_blwc[1] = XGBE_SPEED_2500_BLWC;
		sc->prv.serdes_blwc[2] = XGBE_SPEED_10000_BLWC;
	}

	error = axgbe_get_optional_prop(dev, phy_node, XGBE_CDR_RATE_PROPERTY,
	    sc->prv.serdes_cdr_rate, sizeof(sc->prv.serdes_cdr_rate));
	if (error > 0) {
		return (error);
	} else if (error < 0) {
		sc->prv.serdes_cdr_rate[0] = XGBE_SPEED_1000_CDR;
		sc->prv.serdes_cdr_rate[1] = XGBE_SPEED_2500_CDR;
		sc->prv.serdes_cdr_rate[2] = XGBE_SPEED_10000_CDR;
	}

	error = axgbe_get_optional_prop(dev, phy_node, XGBE_PQ_SKEW_PROPERTY,
	    sc->prv.serdes_pq_skew, sizeof(sc->prv.serdes_pq_skew));
	if (error > 0) {
		return (error);
	} else if (error < 0) {
		sc->prv.serdes_pq_skew[0] = XGBE_SPEED_1000_PQ;
		sc->prv.serdes_pq_skew[1] = XGBE_SPEED_2500_PQ;
		sc->prv.serdes_pq_skew[2] = XGBE_SPEED_10000_PQ;
	}

	error = axgbe_get_optional_prop(dev, phy_node, XGBE_TX_AMP_PROPERTY,
	    sc->prv.serdes_tx_amp, sizeof(sc->prv.serdes_tx_amp));
	if (error > 0) {
		return (error);
	} else if (error < 0) {
		sc->prv.serdes_tx_amp[0] = XGBE_SPEED_1000_TXAMP;
		sc->prv.serdes_tx_amp[1] = XGBE_SPEED_2500_TXAMP;
		sc->prv.serdes_tx_amp[2] = XGBE_SPEED_10000_TXAMP;
	}

	error = axgbe_get_optional_prop(dev, phy_node, XGBE_DFE_CFG_PROPERTY,
	    sc->prv.serdes_dfe_tap_cfg, sizeof(sc->prv.serdes_dfe_tap_cfg));
	if (error > 0) {
		return (error);
	} else if (error < 0) {
		sc->prv.serdes_dfe_tap_cfg[0] = XGBE_SPEED_1000_DFE_TAP_CONFIG;
		sc->prv.serdes_dfe_tap_cfg[1] = XGBE_SPEED_2500_DFE_TAP_CONFIG;
		sc->prv.serdes_dfe_tap_cfg[2] = XGBE_SPEED_10000_DFE_TAP_CONFIG;
	}

	error = axgbe_get_optional_prop(dev, phy_node, XGBE_DFE_ENA_PROPERTY,
	    sc->prv.serdes_dfe_tap_ena, sizeof(sc->prv.serdes_dfe_tap_ena));
	if (error > 0) {
		return (error);
	} else if (error < 0) {
		sc->prv.serdes_dfe_tap_ena[0] = XGBE_SPEED_1000_DFE_TAP_ENABLE;
		sc->prv.serdes_dfe_tap_ena[1] = XGBE_SPEED_2500_DFE_TAP_ENABLE;
		sc->prv.serdes_dfe_tap_ena[2] = XGBE_SPEED_10000_DFE_TAP_ENABLE;
	}

	/* Check if the NIC is DMA coherent */
	sc->prv.coherent = OF_hasprop(node, "dma-coherent");
	if (sc->prv.coherent) {
		sc->prv.axdomain = XGBE_DMA_OS_AXDOMAIN;
		sc->prv.arcache = XGBE_DMA_OS_ARCACHE;
		sc->prv.awcache = XGBE_DMA_OS_AWCACHE;
	} else {
		sc->prv.axdomain = XGBE_DMA_SYS_AXDOMAIN;
		sc->prv.arcache = XGBE_DMA_SYS_ARCACHE;
		sc->prv.awcache = XGBE_DMA_SYS_AWCACHE;
	}

	/* Create the lock & workqueues */
	spin_lock_init(&sc->prv.xpcs_lock);
	sc->prv.dev_workqueue = taskqueue_create("axgbe", M_WAITOK,
	    taskqueue_thread_enqueue, &sc->prv.dev_workqueue);
	taskqueue_start_threads(&sc->prv.dev_workqueue, 1, PI_NET,
	    "axgbe taskq");

	/* Set the needed pointers */
	xgbe_init_function_ptrs_phy(&sc->prv.phy_if);
	xgbe_init_function_ptrs_dev(&sc->prv.hw_if);
	xgbe_init_function_ptrs_desc(&sc->prv.desc_if);

	/* Reset the hardware */
	sc->prv.hw_if.exit(&sc->prv);

	/* Read the hardware features */
	xgbe_get_all_hw_features(&sc->prv);

	/* Set default values */
	sc->prv.pblx8 = DMA_PBL_X8_ENABLE;
	sc->prv.tx_desc_count = XGBE_TX_DESC_CNT;
	sc->prv.tx_sf_mode = MTL_TSF_ENABLE;
	sc->prv.tx_threshold = MTL_TX_THRESHOLD_64;
	sc->prv.tx_pbl = DMA_PBL_16;
	sc->prv.tx_osp_mode = DMA_OSP_ENABLE;
	sc->prv.rx_desc_count = XGBE_RX_DESC_CNT;
	sc->prv.rx_sf_mode = MTL_RSF_DISABLE;
	sc->prv.rx_threshold = MTL_RX_THRESHOLD_64;
	sc->prv.rx_pbl = DMA_PBL_16;
	sc->prv.pause_autoneg = 1;
	sc->prv.tx_pause = 1;
	sc->prv.rx_pause = 1;
	sc->prv.phy_speed = SPEED_UNKNOWN;
	sc->prv.power_down = 0;

	/* TODO: Limit to min(ncpus, hw rings) */
	sc->prv.tx_ring_count = 1;
	sc->prv.tx_q_count = 1;
	sc->prv.rx_ring_count = 1;
	sc->prv.rx_q_count = sc->prv.hw_feat.rx_q_cnt;

	/* Init the PHY */
	sc->prv.phy_if.phy_init(&sc->prv);

	/* Set the coalescing */
	xgbe_init_rx_coalesce(&sc->prv);
	xgbe_init_tx_coalesce(&sc->prv);

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_init = axgbe_init;
        ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = axgbe_ioctl;
	ifp->if_transmit = xgbe_xmit;
	ifp->if_qflush = axgbe_qflush;
	ifp->if_get_counter = axgbe_get_counter;

	/* TODO: Support HW offload */
	ifp->if_capabilities = 0;
	ifp->if_capenable = 0;
	ifp->if_hwassist = 0;

	ether_ifattach(ifp, sc->mac_addr);

	ifmedia_init(&sc->media, IFM_IMASK, axgbe_media_change,
	    axgbe_media_status);
#ifdef notyet
	ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
#endif
	ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);

	set_bit(XGBE_DOWN, &sc->prv.dev_state);

	if (xgbe_open(ifp) < 0) {
		device_printf(dev, "ndo_open failed\n");
		return (ENXIO);
	}

	return (0);
}
Example #8
0
static int
gx_attach(device_t dev)
{
	struct ifnet *ifp;
	struct gx_softc *sc;
	uint8_t mac[6];
	int error;
	int rid;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;
	sc->sc_port = device_get_unit(dev);

	/* Read MAC address.  */
	GXEMUL_ETHER_DEV_WRITE(GXEMUL_ETHER_DEV_MAC, (uintptr_t)mac);

	/* Allocate and establish interrupt.  */
	rid = 0;
	sc->sc_intr = bus_alloc_resource(sc->sc_dev, SYS_RES_IRQ, &rid,
	    GXEMUL_ETHER_DEV_IRQ - 2, GXEMUL_ETHER_DEV_IRQ - 2, 1, RF_ACTIVE);
	if (sc->sc_intr == NULL) {
		device_printf(dev, "unable to allocate IRQ.\n");
		return (ENXIO);
	}

	error = bus_setup_intr(sc->sc_dev, sc->sc_intr, INTR_TYPE_NET, NULL,
	    gx_rx_intr, sc, &sc->sc_intr_cookie);
	if (error != 0) {
		device_printf(dev, "unable to setup interrupt.\n");
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_intr);
		return (ENXIO);
	}

	bus_describe_intr(sc->sc_dev, sc->sc_intr, sc->sc_intr_cookie, "rx");

	ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(dev, "cannot allocate ifnet.\n");
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_intr);
		return (ENOMEM);
	}

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_mtu = ETHERMTU;
	ifp->if_init = gx_init;
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI;
	ifp->if_ioctl = gx_ioctl;

	sc->sc_ifp = ifp;
	sc->sc_flags = ifp->if_flags;

	ifmedia_init(&sc->sc_ifmedia, 0, gx_medchange, gx_medstat);

	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);

	mtx_init(&sc->sc_mtx, "GXemul Ethernet", NULL, MTX_DEF);

	ether_ifattach(ifp, mac);

	ifp->if_transmit = gx_transmit;

	return (bus_generic_attach(dev));
}
Example #9
0
static int
usie_attach(device_t self)
{
    struct usie_softc *sc = device_get_softc(self);
    struct usb_attach_arg *uaa = device_get_ivars(self);
    struct ifnet *ifp;
    struct usb_interface *iface;
    struct usb_interface_descriptor *id;
    struct usb_device_request req;
    int err;
    uint16_t fwattr;
    uint8_t iface_index;
    uint8_t ifidx;
    uint8_t start;

    device_set_usb_desc(self);
    sc->sc_udev = uaa->device;
    sc->sc_dev = self;

    mtx_init(&sc->sc_mtx, "usie", MTX_NETWORK_LOCK, MTX_DEF);
    ucom_ref(&sc->sc_super_ucom);

    TASK_INIT(&sc->sc_if_status_task, 0, usie_if_status_cb, sc);
    TASK_INIT(&sc->sc_if_sync_task, 0, usie_if_sync_cb, sc);

    usb_callout_init_mtx(&sc->sc_if_sync_ch, &sc->sc_mtx, 0);

    mtx_lock(&sc->sc_mtx);

    /* set power mode to D0 */
    req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
    req.bRequest = USIE_POWER;
    USETW(req.wValue, 0);
    USETW(req.wIndex, 0);
    USETW(req.wLength, 0);
    if (usie_do_request(sc, &req, NULL)) {
        mtx_unlock(&sc->sc_mtx);
        goto detach;
    }
    /* read fw attr */
    fwattr = 0;
    req.bmRequestType = UT_READ_VENDOR_DEVICE;
    req.bRequest = USIE_FW_ATTR;
    USETW(req.wValue, 0);
    USETW(req.wIndex, 0);
    USETW(req.wLength, sizeof(fwattr));
    if (usie_do_request(sc, &req, &fwattr)) {
        mtx_unlock(&sc->sc_mtx);
        goto detach;
    }
    mtx_unlock(&sc->sc_mtx);

    /* check DHCP supports */
    DPRINTF("fwattr=%x\n", fwattr);
    if (!(fwattr & USIE_FW_DHCP)) {
        device_printf(self, "DHCP is not supported. A firmware upgrade might be needed.\n");
    }

    /* find available interfaces */
    sc->sc_nucom = 0;
    for (ifidx = 0; ifidx < USIE_IFACE_MAX; ifidx++) {
        iface = usbd_get_iface(uaa->device, ifidx);
        if (iface == NULL)
            break;

        id = usbd_get_interface_descriptor(iface);
        if ((id == NULL) || (id->bInterfaceClass != UICLASS_VENDOR))
            continue;

        /* setup Direct IP transfer */
        if (id->bInterfaceNumber >= 7 && id->bNumEndpoints == 3) {
            sc->sc_if_ifnum = id->bInterfaceNumber;
            iface_index = ifidx;

            DPRINTF("ifnum=%d, ifidx=%d\n",
                    sc->sc_if_ifnum, ifidx);

            err = usbd_transfer_setup(uaa->device,
                                      &iface_index, sc->sc_if_xfer, usie_if_config,
                                      USIE_IF_N_XFER, sc, &sc->sc_mtx);

            if (err == 0)
                continue;

            device_printf(self,
                          "could not allocate USB transfers on "
                          "iface_index=%d, err=%s\n",
                          iface_index, usbd_errstr(err));
            goto detach;
        }

        /* setup ucom */
        if (sc->sc_nucom >= USIE_UCOM_MAX)
            continue;

        usbd_set_parent_iface(uaa->device, ifidx,
                              uaa->info.bIfaceIndex);

        DPRINTF("NumEndpoints=%d bInterfaceNumber=%d\n",
                id->bNumEndpoints, id->bInterfaceNumber);

        if (id->bNumEndpoints == 2) {
            sc->sc_uc_xfer[sc->sc_nucom][0] = NULL;
            start = 1;
        } else
            start = 0;

        err = usbd_transfer_setup(uaa->device, &ifidx,
                                  sc->sc_uc_xfer[sc->sc_nucom] + start,
                                  usie_uc_config + start, USIE_UC_N_XFER - start,
                                  &sc->sc_ucom[sc->sc_nucom], &sc->sc_mtx);

        if (err != 0) {
            DPRINTF("usbd_transfer_setup error=%s\n", usbd_errstr(err));
            continue;
        }

        mtx_lock(&sc->sc_mtx);
        for (; start < USIE_UC_N_XFER; start++)
            usbd_xfer_set_stall(sc->sc_uc_xfer[sc->sc_nucom][start]);
        mtx_unlock(&sc->sc_mtx);

        sc->sc_uc_ifnum[sc->sc_nucom] = id->bInterfaceNumber;

        sc->sc_nucom++;		/* found a port */
    }

    if (sc->sc_nucom == 0) {
        device_printf(self, "no comports found\n");
        goto detach;
    }

    err = ucom_attach(&sc->sc_super_ucom, sc->sc_ucom,
                      sc->sc_nucom, sc, &usie_uc_callback, &sc->sc_mtx);

    if (err != 0) {
        DPRINTF("ucom_attach failed\n");
        goto detach;
    }
    DPRINTF("Found %d interfaces.\n", sc->sc_nucom);

    /* setup ifnet (Direct IP) */
    sc->sc_ifp = ifp = if_alloc(IFT_OTHER);

    if (ifp == NULL) {
        device_printf(self, "Could not allocate a network interface\n");
        goto detach;
    }
    if_initname(ifp, "usie", device_get_unit(self));

    ifp->if_softc = sc;
    ifp->if_mtu = USIE_MTU_MAX;
    ifp->if_flags |= IFF_NOARP;
    ifp->if_init = usie_if_init;
    ifp->if_ioctl = usie_if_ioctl;
    ifp->if_start = usie_if_start;
    ifp->if_output = usie_if_output;
    IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
    ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
    IFQ_SET_READY(&ifp->if_snd);

    if_attach(ifp);
    bpfattach(ifp, DLT_RAW, 0);

    if (fwattr & USIE_PM_AUTO) {
        usbd_set_power_mode(uaa->device, USB_POWER_MODE_SAVE);
        DPRINTF("enabling automatic suspend and resume\n");
    } else {
        usbd_set_power_mode(uaa->device, USB_POWER_MODE_ON);
        DPRINTF("USB power is always ON\n");
    }

    DPRINTF("device attached\n");
    return (0);

detach:
    usie_detach(self);
    return (ENOMEM);
}
Example #10
0
static int
vtbe_attach(device_t dev)
{
	uint8_t macaddr[ETHER_ADDR_LEN];
	struct vtbe_softc *sc;
	struct ifnet *ifp;
	int reg;

	sc = device_get_softc(dev);
	sc->dev = dev;

	sc->hdrsize = sizeof(struct virtio_net_hdr_mrg_rxbuf);

	if (bus_alloc_resources(dev, vtbe_spec, sc->res)) {
		device_printf(dev, "could not allocate resources\n");
		return (ENXIO);
	}

	/* Memory interface */
	sc->bst = rman_get_bustag(sc->res[0]);
	sc->bsh = rman_get_bushandle(sc->res[0]);

	mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
	    MTX_NETWORK_LOCK, MTX_DEF);

	if (setup_offset(dev, &sc->beri_mem_offset) != 0)
		return (ENXIO);
	if (setup_pio(dev, "pio-send", &sc->pio_send) != 0)
		return (ENXIO);
	if (setup_pio(dev, "pio-recv", &sc->pio_recv) != 0)
		return (ENXIO);

	/* Setup MMIO */

	/* Specify that we provide network device */
	reg = htobe32(VIRTIO_ID_NETWORK);
	WRITE4(sc, VIRTIO_MMIO_DEVICE_ID, reg);

	/* The number of desc we support */
	reg = htobe32(DESC_COUNT);
	WRITE4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX, reg);

	/* Our features */
	reg = htobe32(VIRTIO_NET_F_MAC |
			VIRTIO_NET_F_MRG_RXBUF |
    			VIRTIO_F_NOTIFY_ON_EMPTY);
	WRITE4(sc, VIRTIO_MMIO_HOST_FEATURES, reg);

	/* Get MAC */
	if (vtbe_get_hwaddr(sc, macaddr)) {
		device_printf(sc->dev, "can't get mac\n");
		return (ENXIO);
	}

	/* Set up the ethernet interface. */
	sc->ifp = ifp = if_alloc(IFT_ETHER);
	ifp->if_baudrate = IF_Gbps(10);
	ifp->if_softc = sc;
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX |
			 IFF_MULTICAST | IFF_PROMISC);
	ifp->if_capabilities = IFCAP_VLAN_MTU;
	ifp->if_capenable = ifp->if_capabilities;
	ifp->if_start = vtbe_txstart;
	ifp->if_ioctl = vtbe_ioctl;
	ifp->if_init = vtbe_init;
	IFQ_SET_MAXLEN(&ifp->if_snd, DESC_COUNT - 1);
	ifp->if_snd.ifq_drv_maxlen = DESC_COUNT - 1;
	IFQ_SET_READY(&ifp->if_snd);
	ifp->if_hdrlen = sizeof(struct ether_vlan_header);

	/* All ready to run, attach the ethernet interface. */
	ether_ifattach(ifp, macaddr);

	sc->is_attached = true;

	return (0);
}
Example #11
0
static void
ue_attach_post_task(struct usb_proc_msg *_task)
{
	struct usb_ether_cfg_task *task =
	    (struct usb_ether_cfg_task *)_task;
	struct usb_ether *ue = task->ue;
	struct ifnet *ifp;
	int error;
	char num[14];			/* sufficient for 32 bits */

	/* first call driver's post attach routine */
	ue->ue_methods->ue_attach_post(ue);

	UE_UNLOCK(ue);

	ue->ue_unit = alloc_unr(ueunit);
	usb_callout_init_mtx(&ue->ue_watchdog, ue->ue_mtx, 0);
	sysctl_ctx_init(&ue->ue_sysctl_ctx);

	error = 0;
	CURVNET_SET_QUIET(vnet0);
	ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(ue->ue_dev, "could not allocate ifnet\n");
		goto fail;
	}

	ifp->if_softc = ue;
	if_initname(ifp, "ue", ue->ue_unit);
	if (ue->ue_methods->ue_attach_post_sub != NULL) {
		ue->ue_ifp = ifp;
		error = ue->ue_methods->ue_attach_post_sub(ue);
	} else {
		ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
		if (ue->ue_methods->ue_ioctl != NULL)
			ifp->if_ioctl = ue->ue_methods->ue_ioctl;
		else
			ifp->if_ioctl = uether_ioctl;
		ifp->if_start = ue_start;
		ifp->if_init = ue_init;
		IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
		ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
		IFQ_SET_READY(&ifp->if_snd);
		ue->ue_ifp = ifp;

		if (ue->ue_methods->ue_mii_upd != NULL &&
		    ue->ue_methods->ue_mii_sts != NULL) {
			/* device_xxx() depends on this */
			mtx_lock(&Giant);
			error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp,
			    ue_ifmedia_upd, ue->ue_methods->ue_mii_sts,
			    BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
			mtx_unlock(&Giant);
		}
	}

	if (error) {
		device_printf(ue->ue_dev, "attaching PHYs failed\n");
		goto fail;
	}

	if_printf(ifp, "<USB Ethernet> on %s\n", device_get_nameunit(ue->ue_dev));
	ether_ifattach(ifp, ue->ue_eaddr);
	/* Tell upper layer we support VLAN oversized frames. */
	if (ifp->if_capabilities & IFCAP_VLAN_MTU)
		ifp->if_hdrlen = sizeof(struct ether_vlan_header);

	CURVNET_RESTORE();

	snprintf(num, sizeof(num), "%u", ue->ue_unit);
	ue->ue_sysctl_oid = SYSCTL_ADD_NODE(&ue->ue_sysctl_ctx,
	    &SYSCTL_NODE_CHILDREN(_net, ue),
	    OID_AUTO, num, CTLFLAG_RD, NULL, "");
	SYSCTL_ADD_PROC(&ue->ue_sysctl_ctx,
	    SYSCTL_CHILDREN(ue->ue_sysctl_oid), OID_AUTO,
	    "%parent", CTLTYPE_STRING | CTLFLAG_RD, ue, 0,
	    ue_sysctl_parent, "A", "parent device");

	UE_LOCK(ue);
	return;

fail:
	CURVNET_RESTORE();
	free_unr(ueunit, ue->ue_unit);
	if (ue->ue_ifp != NULL) {
		if_free(ue->ue_ifp);
		ue->ue_ifp = NULL;
	}
	UE_LOCK(ue);
	return;
}
Example #12
0
static int
fwip_attach(device_t dev)
{
	struct fwip_softc *fwip;
	struct ifnet *ifp;
	int unit, s;
	struct fw_hwaddr *hwaddr;

	fwip = ((struct fwip_softc *)device_get_softc(dev));
	unit = device_get_unit(dev);
	ifp = fwip->fw_softc.fwip_ifp = if_alloc(IFT_IEEE1394);
	if (ifp == NULL)
		return (ENOSPC);

	mtx_init(&fwip->mtx, "fwip", NULL, MTX_DEF);
	/* XXX */
	fwip->dma_ch = -1;

	fwip->fd.fc = device_get_ivars(dev);
	if (tx_speed < 0)
		tx_speed = fwip->fd.fc->speed;

	fwip->fd.dev = dev;
	fwip->fd.post_explore = NULL;
	fwip->fd.post_busreset = fwip_post_busreset;
	fwip->fw_softc.fwip = fwip;
	TASK_INIT(&fwip->start_send, 0, fwip_start_send, fwip);

	/*
	 * Encode our hardware the way that arp likes it.
	 */
	hwaddr = &IFP2FWC(fwip->fw_softc.fwip_ifp)->fc_hwaddr;
	hwaddr->sender_unique_ID_hi = htonl(fwip->fd.fc->eui.hi);
	hwaddr->sender_unique_ID_lo = htonl(fwip->fd.fc->eui.lo);
	hwaddr->sender_max_rec = fwip->fd.fc->maxrec;
	hwaddr->sspd = fwip->fd.fc->speed;
	hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32));
	hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO);

	/* fill the rest and attach interface */	
	ifp->if_softc = &fwip->fw_softc;

#if __FreeBSD_version >= 501113 || defined(__DragonFly__)
	if_initname(ifp, device_get_name(dev), unit);
#else
	ifp->if_unit = unit;
	ifp->if_name = "fwip";
#endif
	ifp->if_init = fwip_init;
	ifp->if_start = fwip_start;
	ifp->if_ioctl = fwip_ioctl;
	ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST);
	ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE;
#ifdef DEVICE_POLLING
	ifp->if_capabilities |= IFCAP_POLLING;
#endif

	s = splimp();
	firewire_ifattach(ifp, hwaddr);
	splx(s);

	FWIPDEBUG(ifp, "interface created\n");
	return 0;
}
Example #13
0
static int
t4_cloner_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
{
	struct match_rr mrr;
	struct adapter *sc;
	struct ifnet *ifp;
	int rc, unit;
	const uint8_t lla[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};

	mrr.name = name;
	mrr.lock = 1;
	mrr.sc = NULL;
	mrr.rc = ENOENT;
	t4_iterate(match_name, &mrr);

	if (mrr.rc != 0)
		return (mrr.rc);
	sc = mrr.sc;

	KASSERT(sc != NULL, ("%s: name (%s) matched but softc is NULL",
	    __func__, name));
	ASSERT_SYNCHRONIZED_OP(sc);

	sx_xlock(&t4_trace_lock);

	if (sc->ifp != NULL) {
		rc = EEXIST;
		goto done;
	}
	if (sc->traceq < 0) {
		rc = EAGAIN;
		goto done;
	}


	unit = -1;
	rc = ifc_alloc_unit(ifc, &unit);
	if (rc != 0)
		goto done;

	ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		ifc_free_unit(ifc, unit);
		rc = ENOMEM;
		goto done;
	}

	/* Note that if_xname is not <if_dname><if_dunit>. */
	strlcpy(ifp->if_xname, name, sizeof(ifp->if_xname));
	ifp->if_dname = t4_cloner_name;
	ifp->if_dunit = unit;
	ifp->if_init = tracer_init;
	ifp->if_flags = IFF_SIMPLEX | IFF_DRV_RUNNING;
	ifp->if_ioctl = tracer_ioctl;
	ifp->if_transmit = tracer_transmit;
	ifp->if_qflush = tracer_qflush;
	ifp->if_capabilities = IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
	ifmedia_init(&sc->media, IFM_IMASK, tracer_media_change,
	    tracer_media_status);
	ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE, 0, NULL);
	ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE);
	ether_ifattach(ifp, lla);

	mtx_lock(&sc->ifp_lock);
	ifp->if_softc = sc;
	sc->ifp = ifp;
	mtx_unlock(&sc->ifp_lock);
done:
	sx_xunlock(&t4_trace_lock);
	end_synchronized_op(sc, 0);
	return (rc);
}
Example #14
0
static int
fwe_attach(device_t dev)
{
	struct fwe_softc *fwe;
	struct ifnet *ifp;
	int unit, s;
#if defined(__DragonFly__) || __FreeBSD_version < 500000
	u_char *eaddr;
#else
	u_char eaddr[6];
#endif
	struct fw_eui64 *eui;

	fwe = ((struct fwe_softc *)device_get_softc(dev));
	unit = device_get_unit(dev);

	bzero(fwe, sizeof(struct fwe_softc));
	mtx_init(&fwe->mtx, "fwe", NULL, MTX_DEF);
	/* XXX */
	fwe->stream_ch = stream_ch;
	fwe->dma_ch = -1;

	fwe->fd.fc = device_get_ivars(dev);
	if (tx_speed < 0)
		tx_speed = fwe->fd.fc->speed;

	fwe->fd.dev = dev;
	fwe->fd.post_explore = NULL;
	fwe->eth_softc.fwe = fwe;

	fwe->pkt_hdr.mode.stream.tcode = FWTCODE_STREAM;
	fwe->pkt_hdr.mode.stream.sy = 0;
	fwe->pkt_hdr.mode.stream.chtag = fwe->stream_ch;

	/* generate fake MAC address: first and last 3bytes from eui64 */
#define LOCAL (0x02)
#define GROUP (0x01)
#if defined(__DragonFly__) || __FreeBSD_version < 500000
	eaddr = &IFP2ENADDR(fwe->eth_softc.ifp)[0];
#endif


	eui = &fwe->fd.fc->eui;
	eaddr[0] = (FW_EUI64_BYTE(eui, 0) | LOCAL) & ~GROUP;
	eaddr[1] = FW_EUI64_BYTE(eui, 1);
	eaddr[2] = FW_EUI64_BYTE(eui, 2);
	eaddr[3] = FW_EUI64_BYTE(eui, 5);
	eaddr[4] = FW_EUI64_BYTE(eui, 6);
	eaddr[5] = FW_EUI64_BYTE(eui, 7);
	printf("if_fwe%d: Fake Ethernet address: "
		"%02x:%02x:%02x:%02x:%02x:%02x\n", unit,
		eaddr[0], eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]);

	/* fill the rest and attach interface */	
	ifp = fwe->eth_softc.ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(dev, "can not if_alloc()\n");
		return (ENOSPC);
	}
	ifp->if_softc = &fwe->eth_softc;

#if __FreeBSD_version >= 501113 || defined(__DragonFly__)
	if_initname(ifp, device_get_name(dev), unit);
#else
	ifp->if_unit = unit;
	ifp->if_name = "fwe";
#endif
	ifp->if_init = fwe_init;
#if defined(__DragonFly__) || __FreeBSD_version < 500000
	ifp->if_output = ether_output;
#endif
	ifp->if_start = fwe_start;
	ifp->if_ioctl = fwe_ioctl;
	ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST);
	ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE;

	s = splimp();
#if defined(__DragonFly__) || __FreeBSD_version < 500000
	ether_ifattach(ifp, 1);
#else
	ether_ifattach(ifp, eaddr);
#endif
	splx(s);

        /* Tell the upper layer(s) we support long frames. */
	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_POLLING;
	ifp->if_capenable |= IFCAP_VLAN_MTU;
#endif


	FWEDEBUG(ifp, "interface created\n");
	return 0;
}
Example #15
0
/*
 * Create an interface instance.
 */
static int
edsc_clone_create(struct if_clone *ifc, int unit, caddr_t params)
{
	struct edsc_softc	*sc;
	struct ifnet		*ifp;
	static u_char		 eaddr[ETHER_ADDR_LEN];	/* 0:0:0:0:0:0 */

	/*
	 * Allocate soft and ifnet structures.  Link each to the other.
	 */
	sc = malloc(sizeof(struct edsc_softc), M_EDSC, M_WAITOK | M_ZERO);
	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		free(sc, M_EDSC);
		return (ENOSPC);
	}

	ifp->if_softc = sc;

	/*
	 * Get a name for this particular interface in its ifnet structure.
	 */
	if_initname(ifp, edscname, unit);

	/*
	 * Typical Ethernet interface flags: we can do broadcast and
	 * multicast but can't hear our own broadcasts or multicasts.
	 */
	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;

	/*
	 * We can pretent we have the whole set of hardware features
	 * because we just discard all packets we get from the upper layer.
	 * However, the features are disabled initially.  They can be
	 * enabled via edsc_ioctl() when needed.
	 */
	ifp->if_capabilities =
	    IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM |
	    IFCAP_HWCSUM | IFCAP_TSO |
	    IFCAP_JUMBO_MTU;
	ifp->if_capenable = 0;

	/*
	 * Set the interface driver methods.
	 */
	ifp->if_init = edsc_init;
	/* ifp->if_input = edsc_input; */
	ifp->if_ioctl = edsc_ioctl;
	ifp->if_start = edsc_start;

	/*
	 * Set the maximum output queue length from the global parameter.
	 */
	ifp->if_snd.ifq_maxlen = ifqmaxlen;

	/*
	 * Do ifnet initializations common to all Ethernet drivers
	 * and attach to the network interface framework.
	 * TODO: Pick a non-zero link level address.
	 */
	ether_ifattach(ifp, eaddr);

	/*
	 * Now we can mark the interface as running, i.e., ready
	 * for operation.
	 */
	ifp->if_drv_flags |= IFF_DRV_RUNNING;

	return (0);
}
Example #16
0
int
smc_attach(device_t dev)
{
	int			type, error;
	uint16_t		val;
	u_char			eaddr[ETHER_ADDR_LEN];
	struct smc_softc	*sc;
	struct ifnet		*ifp;

	sc = device_get_softc(dev);
	error = 0;

	sc->smc_dev = dev;

	ifp = sc->smc_ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		error = ENOSPC;
		goto done;
	}

	mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);

	/* Set up watchdog callout. */
	callout_init_mtx(&sc->smc_watchdog, &sc->smc_mtx, 0);

	type = SYS_RES_IOPORT;
	if (sc->smc_usemem)
		type = SYS_RES_MEMORY;

	sc->smc_reg_rid = 0;
	sc->smc_reg = bus_alloc_resource(dev, type, &sc->smc_reg_rid, 0, ~0,
	    16, RF_ACTIVE);
	if (sc->smc_reg == NULL) {
		error = ENXIO;
		goto done;
	}

	sc->smc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->smc_irq_rid, 0,
	    ~0, 1, RF_ACTIVE | RF_SHAREABLE);
	if (sc->smc_irq == NULL) {
		error = ENXIO;
		goto done;
	}

	SMC_LOCK(sc);
	smc_reset(sc);
	SMC_UNLOCK(sc);

	smc_select_bank(sc, 3);
	val = smc_read_2(sc, REV);
	sc->smc_chip = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT;
	sc->smc_rev = (val * REV_REV_MASK) >> REV_REV_SHIFT;
	if (bootverbose)
		device_printf(dev, "revision %x\n", sc->smc_rev);

	callout_init_mtx(&sc->smc_mii_tick_ch, &sc->smc_mtx,
	    CALLOUT_RETURNUNLOCKED);
	if (sc->smc_chip >= REV_CHIP_91110FD) {
		(void)mii_attach(dev, &sc->smc_miibus, ifp,
		    smc_mii_ifmedia_upd, smc_mii_ifmedia_sts, BMSR_DEFCAPMASK,
		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
		if (sc->smc_miibus != NULL) {
			sc->smc_mii_tick = smc_mii_tick;
			sc->smc_mii_mediachg = smc_mii_mediachg;
			sc->smc_mii_mediaioctl = smc_mii_mediaioctl;
		}
	}

	smc_select_bank(sc, 1);
	eaddr[0] = smc_read_1(sc, IAR0);
	eaddr[1] = smc_read_1(sc, IAR1);
	eaddr[2] = smc_read_1(sc, IAR2);
	eaddr[3] = smc_read_1(sc, IAR3);
	eaddr[4] = smc_read_1(sc, IAR4);
	eaddr[5] = smc_read_1(sc, IAR5);

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_init = smc_init;
	ifp->if_ioctl = smc_ioctl;
	ifp->if_start = smc_start;
	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
	IFQ_SET_READY(&ifp->if_snd);

	ifp->if_capabilities = ifp->if_capenable = 0;

#ifdef DEVICE_POLLING
	ifp->if_capabilities |= IFCAP_POLLING;
#endif

	ether_ifattach(ifp, eaddr);

	/* Set up taskqueue */
	TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp);
	TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp);
	TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp);
	sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT,
	    taskqueue_thread_enqueue, &sc->smc_tq);
	taskqueue_start_threads(&sc->smc_tq, 1, PI_NET, "%s taskq",
	    device_get_nameunit(sc->smc_dev));

	/* Mask all interrupts. */
	sc->smc_mask = 0;
	smc_write_1(sc, MSK, 0);

	/* Wire up interrupt */
	error = bus_setup_intr(dev, sc->smc_irq,
	    INTR_TYPE_NET|INTR_MPSAFE, smc_intr, NULL, sc, &sc->smc_ih);
	if (error != 0)
		goto done;

done:
	if (error != 0)
		smc_detach(dev);
	return (error);
}
Example #17
0
int
pflog_clone_create(struct if_clone *ifc, int unit)
#endif
{
	struct ifnet *ifp;
	struct pflog_softc *pflogif;
	int s;

	if (unit >= PFLOGIFS_MAX)
		return (EINVAL);

	if ((pflogif = malloc(sizeof(*pflogif),
	    M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
		return (ENOMEM);

	pflogif->sc_unit = unit;
#ifdef __FreeBSD__
	ifp = pflogif->sc_ifp = if_alloc(IFT_PFLOG);
	if (ifp == NULL) {
		free(pflogif, M_DEVBUF);
		return (ENOSPC);
	}
	if_initname(ifp, ifc->ifc_name, unit);
#else
	ifp = &pflogif->sc_if;
	snprintf(ifp->if_xname, sizeof ifp->if_xname, "pflog%d", unit);
#endif
	ifp->if_softc = pflogif;
	ifp->if_mtu = PFLOGMTU;
	ifp->if_ioctl = pflogioctl;
	ifp->if_output = pflogoutput;
	ifp->if_start = pflogstart;
#ifndef __FreeBSD__
	ifp->if_type = IFT_PFLOG;
#endif
	ifp->if_snd.ifq_maxlen = ifqmaxlen;
	ifp->if_hdrlen = PFLOG_HDRLEN;
	if_attach(ifp);
#ifndef __FreeBSD__
	if_alloc_sadl(ifp);
#endif

#if NBPFILTER > 0
#ifdef __FreeBSD__
	bpfattach(ifp, DLT_PFLOG, PFLOG_HDRLEN);
#else
	bpfattach(&pflogif->sc_if.if_bpf, ifp, DLT_PFLOG, PFLOG_HDRLEN);
#endif
#endif

	s = splnet();
#ifdef __FreeBSD__
	/* XXX: Why pf(4) lock?! Better add a pflog lock?! */
	PF_LOCK();
#endif
	LIST_INSERT_HEAD(&pflogif_list, pflogif, sc_list);
	pflogifs[unit] = ifp;
#ifdef __FreeBSD__
	PF_UNLOCK();
#endif
	splx(s);

	return (0);
}
Example #18
0
int
ex_attach(device_t dev)
{
	struct ex_softc *	sc = device_get_softc(dev);
	struct ifnet *		ifp;
	struct ifmedia *	ifm;
	int			error;
	uint16_t		temp;

	ifp = sc->ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(dev, "can not if_alloc()\n");
		return (ENOSPC);
	}
	/* work out which set of irq <-> internal tables to use */
	if (ex_card_type(sc->enaddr) == CARD_TYPE_EX_10_PLUS) {
		sc->irq2ee = plus_irq2eemap;
		sc->ee2irq = plus_ee2irqmap;
	} else {
		sc->irq2ee = irq2eemap;
		sc->ee2irq = ee2irqmap;
	}

	sc->mem_size = CARD_RAM_SIZE;	/* XXX This should be read from the card itself. */

	/*
	 * Initialize the ifnet structure.
	 */
	ifp->if_softc = sc;
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
	ifp->if_start = ex_start;
	ifp->if_ioctl = ex_ioctl;
	ifp->if_init = ex_init;
	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);

	ifmedia_init(&sc->ifmedia, 0, ex_ifmedia_upd, ex_ifmedia_sts);
	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
	    MTX_DEF);
	callout_init_mtx(&sc->timer, &sc->lock, 0);

	temp = ex_eeprom_read(sc, EE_W5);
	if (temp & EE_W5_PORT_TPE)
		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
	if (temp & EE_W5_PORT_BNC)
		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
	if (temp & EE_W5_PORT_AUI)
		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);

	ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_NONE, 0, NULL);
	ifmedia_set(&sc->ifmedia, ex_get_media(sc));

	ifm = &sc->ifmedia;
	ifm->ifm_media = ifm->ifm_cur->ifm_media;	
	ex_ifmedia_upd(ifp);

	/*
	 * Attach the interface.
	 */
	ether_ifattach(ifp, sc->enaddr);

	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
				NULL, ex_intr, (void *)sc, &sc->ih);
	if (error) {
		device_printf(dev, "bus_setup_intr() failed!\n");
		ether_ifdetach(ifp);
		mtx_destroy(&sc->lock);
		return (error);
	}

	return(0);
}
Example #19
0
static int
kr_attach(device_t dev)
{
	uint8_t			eaddr[ETHER_ADDR_LEN];
	struct ifnet		*ifp;
	struct kr_softc		*sc;
	int			error = 0, rid;
	int			unit;

	sc = device_get_softc(dev);
	unit = device_get_unit(dev);
	sc->kr_dev = dev;

	mtx_init(&sc->kr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
	    MTX_DEF);
	callout_init_mtx(&sc->kr_stat_callout, &sc->kr_mtx, 0);
	TASK_INIT(&sc->kr_link_task, 0, kr_link_task, sc);
	pci_enable_busmaster(dev);

	/* Map control/status registers. */
	sc->kr_rid = 0;
	sc->kr_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->kr_rid, 
	    RF_ACTIVE);

	if (sc->kr_res == NULL) {
		device_printf(dev, "couldn't map memory\n");
		error = ENXIO;
		goto fail;
	}

	sc->kr_btag = rman_get_bustag(sc->kr_res);
	sc->kr_bhandle = rman_get_bushandle(sc->kr_res);

	/* Allocate interrupts */
	rid = 0;
	sc->kr_rx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, KR_RX_IRQ,
	    KR_RX_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);

	if (sc->kr_rx_irq == NULL) {
		device_printf(dev, "couldn't map rx interrupt\n");
		error = ENXIO;
		goto fail;
	}

	rid = 0;
	sc->kr_tx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, KR_TX_IRQ,
	    KR_TX_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);

	if (sc->kr_tx_irq == NULL) {
		device_printf(dev, "couldn't map tx interrupt\n");
		error = ENXIO;
		goto fail;
	}

	rid = 0;
	sc->kr_rx_und_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 
	    KR_RX_UND_IRQ, KR_RX_UND_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);

	if (sc->kr_rx_und_irq == NULL) {
		device_printf(dev, "couldn't map rx underrun interrupt\n");
		error = ENXIO;
		goto fail;
	}

	rid = 0;
	sc->kr_tx_ovr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 
	    KR_TX_OVR_IRQ, KR_TX_OVR_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);

	if (sc->kr_tx_ovr_irq == NULL) {
		device_printf(dev, "couldn't map tx overrun interrupt\n");
		error = ENXIO;
		goto fail;
	}

	/* Allocate ifnet structure. */
	ifp = sc->kr_ifp = if_alloc(IFT_ETHER);

	if (ifp == NULL) {
		device_printf(dev, "couldn't allocate ifnet structure\n");
		error = ENOSPC;
		goto fail;
	}
	ifp->if_softc = sc;
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = kr_ioctl;
	ifp->if_start = kr_start;
	ifp->if_init = kr_init;

	/* XXX: add real size */
	IFQ_SET_MAXLEN(&ifp->if_snd, 9);
	ifp->if_snd.ifq_maxlen = 9;
	IFQ_SET_READY(&ifp->if_snd);

	ifp->if_capenable = ifp->if_capabilities;

	eaddr[0] = 0x00;
	eaddr[1] = 0x0C;
	eaddr[2] = 0x42;
	eaddr[3] = 0x09;
	eaddr[4] = 0x5E;
	eaddr[5] = 0x6B;

	if (kr_dma_alloc(sc) != 0) {
		error = ENXIO;
		goto fail;
	}

	/* TODO: calculate prescale */
	CSR_WRITE_4(sc, KR_ETHMCP, (165000000 / (1250000 + 1)) & ~1);

	CSR_WRITE_4(sc, KR_MIIMCFG, KR_MIIMCFG_R);
	DELAY(1000);
	CSR_WRITE_4(sc, KR_MIIMCFG, 0);

	/* Do MII setup. */
	error = mii_attach(dev, &sc->kr_miibus, ifp, kr_ifmedia_upd,
	    kr_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
	if (error != 0) {
		device_printf(dev, "attaching PHYs failed\n");
		goto fail;
	}

	/* Call MI attach routine. */
	ether_ifattach(ifp, eaddr);

	/* Hook interrupt last to avoid having to lock softc */
	error = bus_setup_intr(dev, sc->kr_rx_irq, INTR_TYPE_NET | INTR_MPSAFE,
	    NULL, kr_rx_intr, sc, &sc->kr_rx_intrhand);

	if (error) {
		device_printf(dev, "couldn't set up rx irq\n");
		ether_ifdetach(ifp);
		goto fail;
	}

	error = bus_setup_intr(dev, sc->kr_tx_irq, INTR_TYPE_NET | INTR_MPSAFE,
	    NULL, kr_tx_intr, sc, &sc->kr_tx_intrhand);

	if (error) {
		device_printf(dev, "couldn't set up tx irq\n");
		ether_ifdetach(ifp);
		goto fail;
	}

	error = bus_setup_intr(dev, sc->kr_rx_und_irq, 
	    INTR_TYPE_NET | INTR_MPSAFE, NULL, kr_rx_und_intr, sc, 
	    &sc->kr_rx_und_intrhand);

	if (error) {
		device_printf(dev, "couldn't set up rx underrun irq\n");
		ether_ifdetach(ifp);
		goto fail;
	}

	error = bus_setup_intr(dev, sc->kr_tx_ovr_irq, 
	    INTR_TYPE_NET | INTR_MPSAFE, NULL, kr_tx_ovr_intr, sc, 
	    &sc->kr_tx_ovr_intrhand);

	if (error) {
		device_printf(dev, "couldn't set up tx overrun irq\n");
		ether_ifdetach(ifp);
		goto fail;
	}

fail:
	if (error) 
		kr_detach(dev);

	return (error);
}
Example #20
0
static int
octm_attach(device_t dev)
{
	struct ifnet *ifp;
	struct octm_softc *sc;
	cvmx_mixx_irhwm_t mixx_irhwm;
	cvmx_mixx_intena_t mixx_intena;
	uint64_t mac;
	int error;
	int irq;
	int rid;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;
	sc->sc_port = device_get_unit(dev);

	switch (sc->sc_port) {
	case 0:
		irq = OCTEON_IRQ_MII;
		break;
	case 1:
		irq = OCTEON_IRQ_MII1;
		break;
	default:
		device_printf(dev, "unsupported management port %u.\n", sc->sc_port);
		return (ENXIO);
	}

	/*
	 * Set MAC address for this management port.
	 */
	mac = 0;
	memcpy((u_int8_t *)&mac + 2, cvmx_sysinfo_get()->mac_addr_base, 6);
	mac += sc->sc_port;

	cvmx_mgmt_port_set_mac(sc->sc_port, mac);

	/* No watermark for input ring.  */
	mixx_irhwm.u64 = 0;
	cvmx_write_csr(CVMX_MIXX_IRHWM(sc->sc_port), mixx_irhwm.u64);

	/* Enable input ring interrupts.  */
	mixx_intena.u64 = 0;
	mixx_intena.s.ithena = 1;
	cvmx_write_csr(CVMX_MIXX_INTENA(sc->sc_port), mixx_intena.u64);

	/* Allocate and establish interrupt.  */
	rid = 0;
	sc->sc_intr = bus_alloc_resource(sc->sc_dev, SYS_RES_IRQ, &rid,
	    irq, irq, 1, RF_ACTIVE);
	if (sc->sc_intr == NULL) {
		device_printf(dev, "unable to allocate IRQ.\n");
		return (ENXIO);
	}

	error = bus_setup_intr(sc->sc_dev, sc->sc_intr, INTR_TYPE_NET, NULL,
	    octm_rx_intr, sc, &sc->sc_intr_cookie);
	if (error != 0) {
		device_printf(dev, "unable to setup interrupt.\n");
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_intr);
		return (ENXIO);
	}

	bus_describe_intr(sc->sc_dev, sc->sc_intr, sc->sc_intr_cookie, "rx");

	/* XXX Possibly should enable TX interrupts.  */

	ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(dev, "cannot allocate ifnet.\n");
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_intr);
		return (ENOMEM);
	}

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_mtu = ETHERMTU;
	ifp->if_init = octm_init;
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI;
	ifp->if_ioctl = octm_ioctl;

	sc->sc_ifp = ifp;
	sc->sc_flags = ifp->if_flags;

	ifmedia_init(&sc->sc_ifmedia, 0, octm_medchange, octm_medstat);

	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);

	ether_ifattach(ifp, (const u_int8_t *)&mac + 2);

	ifp->if_transmit = octm_transmit;

	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
	ifp->if_capabilities = IFCAP_VLAN_MTU;
	ifp->if_capenable = ifp->if_capabilities;

	IFQ_SET_MAXLEN(&ifp->if_snd, CVMX_MGMT_PORT_NUM_TX_BUFFERS);
	ifp->if_snd.ifq_drv_maxlen = CVMX_MGMT_PORT_NUM_TX_BUFFERS;
	IFQ_SET_READY(&ifp->if_snd);

	return (bus_generic_attach(dev));
}
Example #21
0
static int
loop_clone_create(struct if_clone *ifc, int unit)
{
	struct ifnet *ifp;
#ifdef T2EX
	int error;
#endif

	ifp = if_alloc(IFT_LOOP);
#ifdef T2EX
	if ( ifp == NULL ) {
		error = ENOMEM;
		goto err_ret0;
	}
#endif

	if_initname(ifp, ifc->ifc_name, unit);

	ifp->if_mtu = LOMTU;
	ifp->if_flags = IFF_LOOPBACK | IFF_MULTICAST | IFF_RUNNING;
	ifp->if_ioctl = loioctl;
	ifp->if_output = looutput;
#ifdef ALTQ
	ifp->if_start = lostart;
#endif
	ifp->if_type = IFT_LOOP;
	ifp->if_hdrlen = 0;
	ifp->if_addrlen = 0;
	ifp->if_dlt = DLT_NULL;
	IFQ_SET_READY(&ifp->if_snd);
	if (unit == 0)
		lo0ifp = ifp;
#ifndef T2EX
	if_attach(ifp);
	if_alloc_sadl(ifp);
#else
	error = if_attach(ifp);
	if ( error != 0 ) {
		goto err_ret1;
	}
	error = if_alloc_sadl(ifp);
	if ( error != 0 ) {
		goto err_ret2;
	}
#endif
#if NBPFILTER > 0
#ifndef T2EX
	bpfattach(ifp, DLT_NULL, sizeof(u_int));
#else
	error = bpfattach(ifp, DLT_NULL, sizeof(u_int));
	if ( error != 0 ) {
		goto err_ret3;
	}
#endif
#endif
#ifdef MBUFTRACE
	ifp->if_mowner = malloc(sizeof(struct mowner), M_DEVBUF,
	    M_WAITOK | M_ZERO);
#ifdef T2EX
	if ( ifp->if_mowner == NULL ) {
		error = ENOMEM;
		goto err_ret4;
	}
#endif
	strlcpy(ifp->if_mowner->mo_name, ifp->if_xname,
	    sizeof(ifp->if_mowner->mo_name));
#ifndef T2EX
	MOWNER_ATTACH(ifp->if_mowner);
#else
	error = MOWNER_ATTACH(ifp->if_mowner);
	if ( error != 0 ) {
		goto err_ret5;
	}
#endif
#endif

	return (0);
#ifdef T2EX
#ifdef MBUFTRACE
err_ret5:
	free(ifp->if_mowner, M_DEVBUF);
err_ret4:
	bpfdetach(ifp);
#endif
#if NBPFILTER > 0
err_ret3:
	if_free_sadl(ifp);
#endif
err_ret2:
	if_detach(ifp);
err_ret1:
	if_free(ifp);
err_ret0:
	return error;
#endif
}
Example #22
0
/*
 * Ctor.
 */
static int
vether_clone_create(struct if_clone *ifc, int unit, caddr_t data)
{
	struct vether_softc *sc;
	struct ifnet *ifp;	
	uint32_t randval;
	uint8_t	lla[ETHER_ADDR_LEN];
/*
 * Allocate software context.
 */ 
	sc = malloc(sizeof(struct vether_softc), M_DEVBUF, M_WAITOK|M_ZERO);
	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		free(sc, M_DEVBUF);
		return (ENOSPC);
	}
	if_initname(ifp, vether_name, unit);
/*
 * Bind software context.
 */ 	
	VETHER_LOCK_INIT(sc);
	ifp->if_softc = sc;
/*
 * Initialize specific attributes.
 */ 
	ifp->if_init = vether_init;
	ifp->if_ioctl = vether_ioctl;
	ifp->if_start = vether_start;
 
 	ifp->if_snd.ifq_maxlen = ifqmaxlen;
 	ifp->if_flags = (IFF_SIMPLEX|IFF_BROADCAST|IFF_MULTICAST|IFF_VETHER);
 
	ifp->if_capabilities = IFCAP_VLAN_MTU|IFCAP_JUMBO_MTU;
	ifp->if_capenable = IFCAP_VLAN_MTU|IFCAP_JUMBO_MTU;
	
	ifmedia_init(&sc->sc_ifm, 0, vether_media_change, vether_media_status);
	ifmedia_add(&sc->sc_ifm, IFM_ETHER|IFM_AUTO, 0, NULL);
	ifmedia_set(&sc->sc_ifm, IFM_ETHER|IFM_AUTO);
 	
 	sc->sc_status = IFM_AVALID;
/*
 * Generate randomized lla.  
 */
	lla[0] = 0x0;
	randval = arc4random();
	memcpy(&lla[1], &randval, sizeof(uint32_t));
	lla[5] = (uint8_t)unit; /* Interface major number */
/*
 * Initialize ethernet specific attributes and perform inclusion 
 * mapping on link layer, netgraph(4) domain and generate by bpf(4) 
 * implemented Inspection Access Point maps to by ifnet(9) defined 
 * generic interface.
 */ 	
 	ether_ifattach(ifp, lla);
 	ifp->if_baudrate = 0;
 
	mtx_lock(&vether_list_mtx);
	LIST_INSERT_HEAD(&vether_list, sc, vether_list);
	mtx_unlock(&vether_list_mtx);

	ifp->if_drv_flags |= IFF_DRV_RUNNING;
 
	return (0);
}
Example #23
0
int
dtsec_attach(device_t dev)
{
	struct dtsec_softc *sc;
	int error;
	struct ifnet *ifp;

	sc = device_get_softc(dev);

	sc->sc_dev = dev;
	sc->sc_mac_mdio_irq = NO_IRQ;
	sc->sc_eth_id = device_get_unit(dev);


	/* Check if MallocSmart allocator is ready */
	if (XX_MallocSmartInit() != E_OK)
		return (ENXIO);

	XX_TrackInit();

	/* Init locks */
	mtx_init(&sc->sc_lock, device_get_nameunit(dev),
	    "DTSEC Global Lock", MTX_DEF);

	mtx_init(&sc->sc_mii_lock, device_get_nameunit(dev),
	    "DTSEC MII Lock", MTX_DEF);

	/* Init callouts */
	callout_init(&sc->sc_tick_callout, CALLOUT_MPSAFE);

	/* Read configuraton */
	if ((error = fman_get_handle(&sc->sc_fmh)) != 0)
		return (error);

	if ((error = fman_get_muram_handle(&sc->sc_muramh)) != 0)
		return (error);

	if ((error = fman_get_bushandle(&sc->sc_fm_base)) != 0)
		return (error);

	/* Configure working mode */
	dtsec_configure_mode(sc);

	/* If we are working in regular mode configure BMAN and QMAN */
	if (sc->sc_mode == DTSEC_MODE_REGULAR) {
		/* Create RX buffer pool */
		error = dtsec_rm_pool_rx_init(sc);
		if (error != 0)
			return (EIO);

		/* Create RX frame queue range */
		error = dtsec_rm_fqr_rx_init(sc);
		if (error != 0)
			return (EIO);

		/* Create frame info pool */
		error = dtsec_rm_fi_pool_init(sc);
		if (error != 0)
			return (EIO);

		/* Create TX frame queue range */
		error = dtsec_rm_fqr_tx_init(sc);
		if (error != 0)
			return (EIO);
	}

	/* Init FMan MAC module. */
	error = dtsec_fm_mac_init(sc, sc->sc_mac_addr);
	if (error != 0) {
		dtsec_detach(dev);
		return (ENXIO);
	}

	/*
	 * XXX: All phys are connected to MDIO interface of the first dTSEC
	 * device (dTSEC0). We have to save handle to the FM_MAC instance of
	 * dTSEC0, which is used later during phy's registers accesses. Another
	 * option would be adding new property to DTS pointing to correct dTSEC
	 * instance, of which FM_MAC handle has to be used for phy's registers
	 * accesses. We did not want to add new properties to DTS, thus this
	 * quite ugly hack.
	 */
	if (sc->sc_eth_id == 0)
		dtsec_mdio_mac_handle = sc->sc_mach;
	if (sc->sc_hidden)
		return (0);

	/* Init FMan TX port */
	error = sc->sc_port_tx_init(sc, device_get_unit(sc->sc_dev));
	if (error != 0) {
		dtsec_detach(dev);
		return (ENXIO);
	}

	/* Init FMan RX port */
	error = sc->sc_port_rx_init(sc, device_get_unit(sc->sc_dev));
	if (error != 0) {
		dtsec_detach(dev);
		return (ENXIO);
	}

	/* Create network interface for upper layers */
	ifp = sc->sc_ifnet = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(sc->sc_dev, "if_alloc() failed.\n");
		dtsec_detach(dev);
		return (ENOMEM);
	}

	ifp->if_softc = sc;
	ifp->if_mtu = ETHERMTU;	/* TODO: Configure */
	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST;
	ifp->if_init = dtsec_if_init;
	ifp->if_start = dtsec_if_start;
	ifp->if_ioctl = dtsec_if_ioctl;
	ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;

	if (sc->sc_phy_addr >= 0)
		if_initname(ifp, device_get_name(sc->sc_dev),
		    device_get_unit(sc->sc_dev));
	else
		if_initname(ifp, "dtsec_phy", device_get_unit(sc->sc_dev));

	/* TODO */
#if 0
	IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1);
	ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1;
	IFQ_SET_READY(&ifp->if_snd);
#endif
	ifp->if_capabilities = 0; /* TODO: Check */
	ifp->if_capenable = ifp->if_capabilities;

	/* Attach PHY(s) */
	error = mii_attach(sc->sc_dev, &sc->sc_mii_dev, ifp, dtsec_ifmedia_upd,
	    dtsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->sc_phy_addr,
	    MII_OFFSET_ANY, 0);
	if (error) {
		device_printf(sc->sc_dev, "attaching PHYs failed: %d\n", error);
		dtsec_detach(sc->sc_dev);
		return (error);
	}
	sc->sc_mii = device_get_softc(sc->sc_mii_dev);

	/* Attach to stack */
	ether_ifattach(ifp, sc->sc_mac_addr);

	return (0);
}