Пример #1
0
/*
 * Destroy a virtual network stack.
 */
void
vnet_destroy(struct vnet *vnet)
{

	SDT_PROBE2(vnet, functions, vnet_destroy, entry, __LINE__, vnet);
	KASSERT(vnet->vnet_sockcnt == 0,
	    ("%s: vnet still has sockets", __func__));

	VNET_LIST_WLOCK();
	LIST_REMOVE(vnet, vnet_le);
	VNET_LIST_WUNLOCK();

	CURVNET_SET_QUIET(vnet);
	vnet_sysuninit();
	CURVNET_RESTORE();

	/*
	 * Release storage for the virtual network stack instance.
	 */
	free(vnet->vnet_data_mem, M_VNET_DATA);
	vnet->vnet_data_mem = NULL;
	vnet->vnet_data_base = 0;
	vnet->vnet_magic_n = 0xdeadbeef;
	free(vnet, M_VNET);
	SDT_PROBE1(vnet, functions, vnet_destroy, return, __LINE__);
}
Пример #2
0
/*
 * Lookup and create a clone network interface.
 */
int
if_clone_create(char *name, size_t len, caddr_t params)
{
	struct if_clone *ifc;

	/* Try to find an applicable cloner for this request */
	IF_CLONERS_LOCK();
	LIST_FOREACH(ifc, &V_if_cloners, ifc_list)
		if (ifc->ifc_type == SIMPLE) {
			if (ifc_simple_match(ifc, name))
				break;
		} else {
			if (ifc->ifc_match(ifc, name))
				break;
		}
#ifdef VIMAGE
	if (ifc == NULL && !IS_DEFAULT_VNET(curvnet)) {
		CURVNET_SET_QUIET(vnet0);
		LIST_FOREACH(ifc, &V_if_cloners, ifc_list)
			if (ifc->ifc_type == SIMPLE) {
				if (ifc_simple_match(ifc, name))
					break;
			} else {
				if (ifc->ifc_match(ifc, name))
					break;
			}
		CURVNET_RESTORE();
	}
Пример #3
0
static void
in6_mtutimo(void *rock)
{
	CURVNET_SET_QUIET((struct vnet *) rock);
	struct timeval atv;
	struct mtuex_arg arg;

	rt_foreach_fib_walk(AF_INET6, in6_mtutimo_setwa, in6_mtuexpire, &arg);

	atv.tv_sec = MTUTIMO_DEFAULT;
	atv.tv_usec = 0;
	callout_reset(&V_rtq_mtutimer, tvtohz(&atv), in6_mtutimo, rock);
	CURVNET_RESTORE();
}
Пример #4
0
void
ieee80211_notify_node_leave(struct ieee80211_node *ni)
{
	struct ieee80211vap *vap = ni->ni_vap;
	struct ifnet *ifp = vap->iv_ifp;

	CURVNET_SET_QUIET(ifp->if_vnet);
	IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode leave",
	    (ni == vap->iv_bss) ? "bss " : "");

	if (ni == vap->iv_bss) {
		rt_ieee80211msg(ifp, RTM_IEEE80211_DISASSOC, NULL, 0);
		if_link_state_change(ifp, LINK_STATE_DOWN);
	} else {
		/* fire off wireless event station leaving */
		notify_macaddr(ifp, RTM_IEEE80211_LEAVE, ni->ni_macaddr);
	}
	CURVNET_RESTORE();
}
Пример #5
0
void
ieee80211_notify_node_join(struct ieee80211_node *ni, int newassoc)
{
	struct ieee80211vap *vap = ni->ni_vap;
	struct ifnet *ifp = vap->iv_ifp;

	CURVNET_SET_QUIET(ifp->if_vnet);
	IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode join",
	    (ni == vap->iv_bss) ? "bss " : "");

	if (ni == vap->iv_bss) {
		notify_macaddr(ifp, newassoc ?
		    RTM_IEEE80211_ASSOC : RTM_IEEE80211_REASSOC, ni->ni_bssid);
		if_link_state_change(ifp, LINK_STATE_UP);
	} else {
		notify_macaddr(ifp, newassoc ?
		    RTM_IEEE80211_JOIN : RTM_IEEE80211_REJOIN, ni->ni_macaddr);
	}
	CURVNET_RESTORE();
}
Пример #6
0
DB_SHOW_ALL_COMMAND(ifnets, db_show_all_ifnets)
{
	VNET_ITERATOR_DECL(vnet_iter);
	struct ifnet *ifp;
	u_short idx;

	VNET_FOREACH(vnet_iter) {
		CURVNET_SET_QUIET(vnet_iter);
#ifdef VIMAGE
		db_printf("vnet=%p\n", curvnet);
#endif
		for (idx = 1; idx <= V_if_index; idx++) {
			ifp = V_ifindex_table[idx].ife_ifnet;
			if (ifp == NULL)
				continue;
			db_printf( "%20s ifp=%p\n", ifp->if_xname, ifp);
			if (db_pager_quit)
				break;
		}
		CURVNET_RESTORE();
	}
}
Пример #7
0
/*
 * Allocate a virtual network stack.
 */
struct vnet *
vnet_alloc(void)
{
	struct vnet *vnet;

	SDT_PROBE1(vnet, functions, vnet_alloc, entry, __LINE__);
	vnet = malloc(sizeof(struct vnet), M_VNET, M_WAITOK | M_ZERO);
	vnet->vnet_magic_n = VNET_MAGIC_N;
	vnet->vnet_state = 0;
	SDT_PROBE2(vnet, functions, vnet_alloc, alloc, __LINE__, vnet);

	/*
	 * Allocate storage for virtualized global variables and copy in
	 * initial values form our 'master' copy.
	 */
	vnet->vnet_data_mem = malloc(VNET_SIZE, M_VNET_DATA, M_WAITOK);
	memcpy(vnet->vnet_data_mem, (void *)VNET_START, VNET_BYTES);

	/*
	 * All use of vnet-specific data will immediately subtract VNET_START
	 * from the base memory pointer, so pre-calculate that now to avoid
	 * it on each use.
	 */
	vnet->vnet_data_base = (uintptr_t)vnet->vnet_data_mem - VNET_START;

	/* Initialize / attach vnet module instances. */
	CURVNET_SET_QUIET(vnet);
	vnet_sysinit();
	CURVNET_RESTORE();

	VNET_LIST_WLOCK();
	LIST_INSERT_HEAD(&vnet_head, vnet, vnet_le);
	VNET_LIST_WUNLOCK();

	SDT_PROBE2(vnet, functions, vnet_alloc, return, __LINE__, vnet);
	return (vnet);
}
Пример #8
0
/*
 * The caller must make sure that the new protocol is fully set up and ready to
 * accept requests before it is registered.
 */
int
pf_proto_register(int family, struct protosw *npr)
{
	VNET_ITERATOR_DECL(vnet_iter);
	struct domain *dp;
	struct protosw *pr, *fpr;

	/* Sanity checks. */
	if (family == 0)
		return (EPFNOSUPPORT);
	if (npr->pr_type == 0)
		return (EPROTOTYPE);
	if (npr->pr_protocol == 0)
		return (EPROTONOSUPPORT);
	if (npr->pr_usrreqs == NULL)
		return (ENXIO);

	/* Try to find the specified domain based on the family. */
	dp = pffinddomain(family);
	if (dp == NULL)
		return (EPFNOSUPPORT);

	/* Initialize backpointer to struct domain. */
	npr->pr_domain = dp;
	fpr = NULL;

	/*
	 * Protect us against races when two protocol registrations for
	 * the same protocol happen at the same time.
	 */
	mtx_lock(&dom_mtx);

	/* The new protocol must not yet exist. */
	for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
		if ((pr->pr_type == npr->pr_type) &&
		    (pr->pr_protocol == npr->pr_protocol)) {
			mtx_unlock(&dom_mtx);
			return (EEXIST);	/* XXX: Check only protocol? */
		}
		/* While here, remember the first free spacer. */
		if ((fpr == NULL) && (pr->pr_protocol == PROTO_SPACER))
			fpr = pr;
	}

	/* If no free spacer is found we can't add the new protocol. */
	if (fpr == NULL) {
		mtx_unlock(&dom_mtx);
		return (ENOMEM);
	}

	/* Copy the new struct protosw over the spacer. */
	bcopy(npr, fpr, sizeof(*fpr));

	/* Job is done, no more protection required. */
	mtx_unlock(&dom_mtx);

	/* Initialize and activate the protocol. */
	VNET_LIST_RLOCK();
	VNET_FOREACH(vnet_iter) {
		CURVNET_SET_QUIET(vnet_iter);
		protosw_init(fpr);
		CURVNET_RESTORE();
	}
	VNET_LIST_RUNLOCK();

	return (0);
}
Пример #9
0
/*
 * Process a received Ethernet packet; the packet is in the
 * mbuf chain m with the ethernet header at the front.
 */
static void
ether_input_internal(struct ifnet *ifp, struct mbuf *m)
{
	struct ether_header *eh;
	u_short etype;

	if ((ifp->if_flags & IFF_UP) == 0) {
		m_freem(m);
		return;
	}
#ifdef DIAGNOSTIC
	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
		if_printf(ifp, "discard frame at !IFF_DRV_RUNNING\n");
		m_freem(m);
		return;
	}
#endif
	/*
	 * Do consistency checks to verify assumptions
	 * made by code past this point.
	 */
	if ((m->m_flags & M_PKTHDR) == 0) {
		if_printf(ifp, "discard frame w/o packet header\n");
		ifp->if_ierrors++;
		m_freem(m);
		return;
	}
	if (m->m_len < ETHER_HDR_LEN) {
		/* XXX maybe should pullup? */
		if_printf(ifp, "discard frame w/o leading ethernet "
				"header (len %u pkt len %u)\n",
				m->m_len, m->m_pkthdr.len);
		ifp->if_ierrors++;
		m_freem(m);
		return;
	}
	eh = mtod(m, struct ether_header *);
	etype = ntohs(eh->ether_type);
	if (m->m_pkthdr.rcvif == NULL) {
		if_printf(ifp, "discard frame w/o interface pointer\n");
		ifp->if_ierrors++;
		m_freem(m);
		return;
	}
#ifdef DIAGNOSTIC
	if (m->m_pkthdr.rcvif != ifp) {
		if_printf(ifp, "Warning, frame marked as received on %s\n",
			m->m_pkthdr.rcvif->if_xname);
	}
#endif

	CURVNET_SET_QUIET(ifp->if_vnet);

	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
		if (ETHER_IS_BROADCAST(eh->ether_dhost))
			m->m_flags |= M_BCAST;
		else
			m->m_flags |= M_MCAST;
		ifp->if_imcasts++;
	}

#ifdef MAC
	/*
	 * Tag the mbuf with an appropriate MAC label before any other
	 * consumers can get to it.
	 */
	mac_ifnet_create_mbuf(ifp, m);
#endif

	/*
	 * Give bpf a chance at the packet.
	 */
	ETHER_BPF_MTAP(ifp, m);

	/*
	 * If the CRC is still on the packet, trim it off. We do this once
	 * and once only in case we are re-entered. Nothing else on the
	 * Ethernet receive path expects to see the FCS.
	 */
	if (m->m_flags & M_HASFCS) {
		m_adj(m, -ETHER_CRC_LEN);
		m->m_flags &= ~M_HASFCS;
	}

	if (!(ifp->if_capenable & IFCAP_HWSTATS))
		ifp->if_ibytes += m->m_pkthdr.len;

	/* Allow monitor mode to claim this frame, after stats are updated. */
	if (ifp->if_flags & IFF_MONITOR) {
		m_freem(m);
		CURVNET_RESTORE();
		return;
	}

	/* Handle input from a lagg(4) port */
	if (ifp->if_type == IFT_IEEE8023ADLAG) {
		KASSERT(lagg_input_p != NULL,
		    ("%s: if_lagg not loaded!", __func__));
		m = (*lagg_input_p)(ifp, m);
		if (m != NULL)
			ifp = m->m_pkthdr.rcvif;
		else {
			CURVNET_RESTORE();
			return;
		}
	}

	/*
	 * If the hardware did not process an 802.1Q tag, do this now,
	 * to allow 802.1P priority frames to be passed to the main input
	 * path correctly.
	 * TODO: Deal with Q-in-Q frames, but not arbitrary nesting levels.
	 */
	if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_VLAN) {
		struct ether_vlan_header *evl;

		if (m->m_len < sizeof(*evl) &&
		    (m = m_pullup(m, sizeof(*evl))) == NULL) {
#ifdef DIAGNOSTIC
			if_printf(ifp, "cannot pullup VLAN header\n");
#endif
			ifp->if_ierrors++;
			m_freem(m);
			CURVNET_RESTORE();
			return;
		}

		evl = mtod(m, struct ether_vlan_header *);
		m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
		m->m_flags |= M_VLANTAG;

		bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
		    ETHER_HDR_LEN - ETHER_TYPE_LEN);
		m_adj(m, ETHER_VLAN_ENCAP_LEN);
		eh = mtod(m, struct ether_header *);
	}

	M_SETFIB(m, ifp->if_fib);

	/* Allow ng_ether(4) to claim this frame. */
	if (IFP2AC(ifp)->ac_netgraph != NULL) {
		KASSERT(ng_ether_input_p != NULL,
		    ("%s: ng_ether_input_p is NULL", __func__));
		m->m_flags &= ~M_PROMISC;
		(*ng_ether_input_p)(ifp, &m);
		if (m == NULL) {
			CURVNET_RESTORE();
			return;
		}
		eh = mtod(m, struct ether_header *);
	}
Пример #10
0
static void
ue_attach_post_task(struct usb_proc_msg *_task)
{
	struct usb_ether_cfg_task *task =
	    (struct usb_ether_cfg_task *)_task;
	struct usb_ether *ue = task->ue;
	struct ifnet *ifp;
	int error;
	char num[14];			/* sufficient for 32 bits */

	/* first call driver's post attach routine */
	ue->ue_methods->ue_attach_post(ue);

	UE_UNLOCK(ue);

	ue->ue_unit = alloc_unr(ueunit);
	usb_callout_init_mtx(&ue->ue_watchdog, ue->ue_mtx, 0);
	sysctl_ctx_init(&ue->ue_sysctl_ctx);

	error = 0;
	CURVNET_SET_QUIET(vnet0);
	ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(ue->ue_dev, "could not allocate ifnet\n");
		goto fail;
	}

	ifp->if_softc = ue;
	if_initname(ifp, "ue", ue->ue_unit);
	if (ue->ue_methods->ue_attach_post_sub != NULL) {
		ue->ue_ifp = ifp;
		error = ue->ue_methods->ue_attach_post_sub(ue);
	} else {
		ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
		if (ue->ue_methods->ue_ioctl != NULL)
			ifp->if_ioctl = ue->ue_methods->ue_ioctl;
		else
			ifp->if_ioctl = uether_ioctl;
		ifp->if_start = ue_start;
		ifp->if_init = ue_init;
		IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
		ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
		IFQ_SET_READY(&ifp->if_snd);
		ue->ue_ifp = ifp;

		if (ue->ue_methods->ue_mii_upd != NULL &&
		    ue->ue_methods->ue_mii_sts != NULL) {
			/* device_xxx() depends on this */
			mtx_lock(&Giant);
			error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp,
			    ue_ifmedia_upd, ue->ue_methods->ue_mii_sts,
			    BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
			mtx_unlock(&Giant);
		}
	}

	if (error) {
		device_printf(ue->ue_dev, "attaching PHYs failed\n");
		goto fail;
	}

	if_printf(ifp, "<USB Ethernet> on %s\n", device_get_nameunit(ue->ue_dev));
	ether_ifattach(ifp, ue->ue_eaddr);
	/* Tell upper layer we support VLAN oversized frames. */
	if (ifp->if_capabilities & IFCAP_VLAN_MTU)
		ifp->if_hdrlen = sizeof(struct ether_vlan_header);

	CURVNET_RESTORE();

	snprintf(num, sizeof(num), "%u", ue->ue_unit);
	ue->ue_sysctl_oid = SYSCTL_ADD_NODE(&ue->ue_sysctl_ctx,
	    &SYSCTL_NODE_CHILDREN(_net, ue),
	    OID_AUTO, num, CTLFLAG_RD, NULL, "");
	SYSCTL_ADD_PROC(&ue->ue_sysctl_ctx,
	    SYSCTL_CHILDREN(ue->ue_sysctl_oid), OID_AUTO,
	    "%parent", CTLTYPE_STRING | CTLFLAG_RD, ue, 0,
	    ue_sysctl_parent, "A", "parent device");

	UE_LOCK(ue);
	return;

fail:
	CURVNET_RESTORE();
	free_unr(ueunit, ue->ue_unit);
	if (ue->ue_ifp != NULL) {
		if_free(ue->ue_ifp);
		ue->ue_ifp = NULL;
	}
	UE_LOCK(ue);
	return;
}