static void bpf_track(void *arg, struct ifnet *ifp, int dlt, int attach) { /* NB: identify vap's by if_init */ if (dlt == DLT_IEEE802_11_RADIO && ifp->if_init == ieee80211_init) { struct ieee80211vap *vap = ifp->if_softc; /* * Track bpf radiotap listener state. We mark the vap * to indicate if any listener is present and the com * to indicate if any listener exists on any associated * vap. This flag is used by drivers to prepare radiotap * state only when needed. */ if (attach) { ieee80211_syncflag_ext(vap, IEEE80211_FEXT_BPF); if (vap->iv_opmode == IEEE80211_M_MONITOR) atomic_add_int(&vap->iv_ic->ic_montaps, 1); } else if (!bpf_peers_present(vap->iv_rawbpf)) { ieee80211_syncflag_ext(vap, -IEEE80211_FEXT_BPF); if (vap->iv_opmode == IEEE80211_M_MONITOR) atomic_subtract_int(&vap->iv_ic->ic_montaps, 1); } } }
static int discoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { u_int32_t af; M_ASSERTPKTHDR(m); /* BPF writes need to be handled specially. */ if (dst->sa_family == AF_UNSPEC) bcopy(dst->sa_data, &af, sizeof(af)); else af = dst->sa_family; if (bpf_peers_present(ifp->if_bpf)) bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m); m->m_pkthdr.rcvif = ifp; ifp->if_opackets++; ifp->if_obytes += m->m_pkthdr.len; m_freem(m); return (0); }
void ipsec_bpf(struct mbuf *m, struct secasvar *sav, int af, int flags) { int mflags; struct enchdr hdr; KASSERT(encif != NULL, ("%s: encif is null", __func__)); KASSERT(flags & (ENC_IN|ENC_OUT), ("%s: invalid flags: %04x", __func__, flags)); if ((encif->if_drv_flags & IFF_DRV_RUNNING) == 0) return; if (flags & ENC_IN) { if ((flags & ipsec_bpf_mask_in) == 0) return; } else { if ((flags & ipsec_bpf_mask_out) == 0) return; } if (bpf_peers_present(encif->if_bpf)) { mflags = 0; hdr.spi = 0; if (!sav) { struct m_tag *mtag; mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL); if (mtag != NULL) { struct tdb_ident *tdbi; tdbi = (struct tdb_ident *) (mtag + 1); if (tdbi->alg_enc != SADB_EALG_NONE) mflags |= M_CONF; if (tdbi->alg_auth != SADB_AALG_NONE) mflags |= M_AUTH; hdr.spi = tdbi->spi; } } else { if (sav->alg_enc != SADB_EALG_NONE) mflags |= M_CONF; if (sav->alg_auth != SADB_AALG_NONE) mflags |= M_AUTH; hdr.spi = sav->spi; } /* * We need to prepend the address family as a four byte * field. Cons up a dummy header to pacify bpf. This * is safe because bpf will only read from the mbuf * (i.e., it won't try to free it or keep a pointer a * to it). */ hdr.af = af; /* hdr.spi already set above */ hdr.flags = mflags; bpf_mtap2(encif->if_bpf, &hdr, sizeof(hdr), m); } }
/* * One helper hook function is used by any hook points. * + from hhook_type we can determine the packet direction: * HHOOK_TYPE_IPSEC_IN or HHOOK_TYPE_IPSEC_OUT; * + from hhook_id we can determine address family: AF_INET or AF_INET6; * + udata contains pointer to enc_softc; * + ctx_data contains pointer to struct ipsec_ctx_data. */ static int enc_hhook(int32_t hhook_type, int32_t hhook_id, void *udata, void *ctx_data, void *hdata, struct osd *hosd) { struct enchdr hdr; struct ipsec_ctx_data *ctx; struct enc_softc *sc; struct ifnet *ifp, *rcvif; struct pfil_head *ph; int pdir; sc = (struct enc_softc *)udata; ifp = sc->sc_ifp; if ((ifp->if_flags & IFF_UP) == 0) return (0); ctx = (struct ipsec_ctx_data *)ctx_data; /* XXX: wrong hook point was used by caller? */ if (ctx->af != hhook_id) return (EPFNOSUPPORT); if (((hhook_type == HHOOK_TYPE_IPSEC_IN && (ctx->enc & V_bpf_mask_in) != 0) || (hhook_type == HHOOK_TYPE_IPSEC_OUT && (ctx->enc & V_bpf_mask_out) != 0)) && bpf_peers_present(ifp->if_bpf) != 0) { hdr.af = ctx->af; hdr.spi = ctx->sav->spi; hdr.flags = 0; if (ctx->sav->alg_enc != SADB_EALG_NONE) hdr.flags |= M_CONF; if (ctx->sav->alg_auth != SADB_AALG_NONE) hdr.flags |= M_AUTH; bpf_mtap2(ifp->if_bpf, &hdr, sizeof(hdr), *ctx->mp); } switch (hhook_type) { case HHOOK_TYPE_IPSEC_IN: if (ctx->enc == IPSEC_ENC_BEFORE) { /* Do accounting only once */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_IBYTES, (*ctx->mp)->m_pkthdr.len); } if ((ctx->enc & V_filter_mask_in) == 0) return (0); /* skip pfil processing */ pdir = PFIL_IN; break; case HHOOK_TYPE_IPSEC_OUT: if (ctx->enc == IPSEC_ENC_BEFORE) { /* Do accounting only once */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, (*ctx->mp)->m_pkthdr.len); } if ((ctx->enc & V_filter_mask_out) == 0) return (0); /* skip pfil processing */ pdir = PFIL_OUT; break; default: return (EINVAL); } switch (hhook_id) { #ifdef INET case AF_INET: ph = &V_inet_pfil_hook; break; #endif #ifdef INET6 case AF_INET6: ph = &V_inet6_pfil_hook; break; #endif default: ph = NULL; } if (ph == NULL || !PFIL_HOOKED(ph)) return (0); /* Make a packet looks like it was received on enc(4) */ rcvif = (*ctx->mp)->m_pkthdr.rcvif; (*ctx->mp)->m_pkthdr.rcvif = ifp; if (pfil_run_hooks(ph, ctx->mp, ifp, pdir, NULL) != 0 || *ctx->mp == NULL) { *ctx->mp = NULL; /* consumed by filter */ return (EACCES); } (*ctx->mp)->m_pkthdr.rcvif = rcvif; return (0); }
/* * Decapsulate. Does the real work and is called from gre_input() * (above). Returns an mbuf back if packet is not yet processed, * and NULL if it needs no further processing. proto is the protocol * number of the "calling" foo_input() routine. */ static struct mbuf * gre_input2(struct mbuf *m ,int hlen, u_char proto) { struct greip *gip; int isr; struct gre_softc *sc; u_int16_t flags; u_int32_t af; if ((sc = gre_lookup(m, proto)) == NULL) { /* No matching tunnel or tunnel is down. */ return (m); } if (m->m_len < sizeof(*gip)) { m = m_pullup(m, sizeof(*gip)); if (m == NULL) return (NULL); } gip = mtod(m, struct greip *); GRE2IFP(sc)->if_ipackets++; GRE2IFP(sc)->if_ibytes += m->m_pkthdr.len; switch (proto) { case IPPROTO_GRE: hlen += sizeof(struct gre_h); /* process GRE flags as packet can be of variable len */ flags = ntohs(gip->gi_flags); /* Checksum & Offset are present */ if ((flags & GRE_CP) | (flags & GRE_RP)) hlen += 4; /* We don't support routing fields (variable length) */ if (flags & GRE_RP) return (m); if (flags & GRE_KP) hlen += 4; if (flags & GRE_SP) hlen += 4; switch (ntohs(gip->gi_ptype)) { /* ethertypes */ case WCCP_PROTOCOL_TYPE: if (sc->wccp_ver == WCCP_V2) hlen += 4; /* FALLTHROUGH */ case ETHERTYPE_IP: /* shouldn't need a schednetisr(), */ isr = NETISR_IP;/* as we are in ip_input */ af = AF_INET; break; #ifdef INET6 case ETHERTYPE_IPV6: isr = NETISR_IPV6; af = AF_INET6; break; #endif #ifdef NETATALK case ETHERTYPE_ATALK: isr = NETISR_ATALK1; af = AF_APPLETALK; break; #endif default: /* Others not yet supported. */ return (m); } break; default: /* Others not yet supported. */ return (m); } if (hlen > m->m_pkthdr.len) { m_freem(m); return (NULL); } /* Unlike NetBSD, in FreeBSD m_adj() adjusts m->m_pkthdr.len as well */ m_adj(m, hlen); if (bpf_peers_present(GRE2IFP(sc)->if_bpf)) { bpf_mtap2(GRE2IFP(sc)->if_bpf, &af, sizeof(af), m); } if ((GRE2IFP(sc)->if_flags & IFF_MONITOR) != 0) { m_freem(m); return(NULL); } m->m_pkthdr.rcvif = GRE2IFP(sc); netisr_queue(isr, m); /* Packet is done, no further processing needed. */ return (NULL); }
/* * if_simloop() * * This function is to support software emulation of hardware loopback, * i.e., for interfaces with the IFF_SIMPLEX attribute. Since they can't * hear their own broadcasts, we create a copy of the packet that we * would normally receive via a hardware loopback. * * This function expects the packet to include the media header of length hlen. */ int if_simloop(struct ifnet *ifp, struct mbuf *m, int af, int hlen) { int isr; M_ASSERTPKTHDR(m); m_tag_delete_nonpersistent(m); m->m_pkthdr.rcvif = ifp; #if 0 /* * Let BPF see incoming packet in the following manner: * - Emulated packet loopback for a simplex interface * (net/if_ethersubr.c) * -> passes it to ifp's BPF * - IPv4/v6 multicast packet loopback (netinet(6)/ip(6)_output.c) * -> not passes it to any BPF * - Normal packet loopback from myself to myself (net/if_loop.c) * -> passes to lo0's BPF (even in case of IPv6, where ifp!=lo0) */ if (hlen > 0) { if (bpf_peers_present(ifp->if_bpf)) { bpf_mtap(ifp->if_bpf, m); } } else { if (bpf_peers_present(V_loif->if_bpf)) { if ((m->m_flags & M_MCAST) == 0 || V_loif == ifp) { /* XXX beware sizeof(af) != 4 */ u_int32_t af1 = af; /* * We need to prepend the address family. */ bpf_mtap2(V_loif->if_bpf, &af1, sizeof(af1), m); } } } #endif /* Strip away media header */ if (hlen > 0) { m_adj(m, hlen); #ifndef __NO_STRICT_ALIGNMENT /* * Some archs do not like unaligned data, so * we move data down in the first mbuf. */ if (mtod(m, vm_offset_t) & 3) { KASSERT(hlen >= 3, ("if_simloop: hlen too small")); bcopy(m->m_data, (char *)(mtod(m, vm_offset_t) - (mtod(m, vm_offset_t) & 3)), m->m_len); m->m_data -= (mtod(m,vm_offset_t) & 3); } #endif } /* Deliver to upper layer protocol */ switch (af) { #ifdef INET case AF_INET: isr = NETISR_IP; break; #endif #ifdef INET6 case AF_INET6: m->m_flags |= M_LOOP; isr = NETISR_IPV6; break; #endif #ifdef IPX case AF_IPX: isr = NETISR_IPX; break; #endif #ifdef NETATALK case AF_APPLETALK: isr = NETISR_ATALK2; break; #endif default: printf("if_simloop: can't handle af=%d\n", af); m_freem(m); return (EAFNOSUPPORT); } ifp->if_ipackets++; ifp->if_ibytes += m->m_pkthdr.len; netisr_queue(isr, m); /* mbuf is free'd on failure. */ return (0); }
static int firewire_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { struct fw_com *fc = IFP2FWC(ifp); int error, type; struct m_tag *mtag; union fw_encap *enc; struct fw_hwaddr *destfw; uint8_t speed; uint16_t psize, fsize, dsize; struct mbuf *mtail; int unicast, dgl, foff; static int next_dgl; #if defined(INET) || defined(INET6) struct llentry *lle; #endif #ifdef MAC error = mac_ifnet_check_transmit(ifp, m); if (error) goto bad; #endif if (!((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))) { error = ENETDOWN; goto bad; } /* * For unicast, we make a tag to store the lladdr of the * destination. This might not be the first time we have seen * the packet (for instance, the arp code might be trying to * re-send it after receiving an arp reply) so we only * allocate a tag if there isn't one there already. For * multicast, we will eventually use a different tag to store * the channel number. */ unicast = !(m->m_flags & (M_BCAST | M_MCAST)); if (unicast) { mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, NULL); if (!mtag) { mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, sizeof (struct fw_hwaddr), M_NOWAIT); if (!mtag) { error = ENOMEM; goto bad; } m_tag_prepend(m, mtag); } destfw = (struct fw_hwaddr *)(mtag + 1); } else { destfw = 0; } switch (dst->sa_family) { #ifdef INET case AF_INET: /* * Only bother with arp for unicast. Allocation of * channels etc. for firewire is quite different and * doesn't fit into the arp model. */ if (unicast) { error = arpresolve(ifp, ro ? ro->ro_rt : NULL, m, dst, (u_char *) destfw, &lle); if (error) return (error == EWOULDBLOCK ? 0 : error); } type = ETHERTYPE_IP; break; case AF_ARP: { struct arphdr *ah; ah = mtod(m, struct arphdr *); ah->ar_hrd = htons(ARPHRD_IEEE1394); type = ETHERTYPE_ARP; if (unicast) *destfw = *(struct fw_hwaddr *) ar_tha(ah); /* * The standard arp code leaves a hole for the target * hardware address which we need to close up. */ bcopy(ar_tpa(ah), ar_tha(ah), ah->ar_pln); m_adj(m, -ah->ar_hln); break; } #endif #ifdef INET6 case AF_INET6: if (unicast) { error = nd6_storelladdr(fc->fc_ifp, m, dst, (u_char *) destfw, &lle); if (error) return (error); } type = ETHERTYPE_IPV6; break; #endif default: if_printf(ifp, "can't handle af%d\n", dst->sa_family); error = EAFNOSUPPORT; goto bad; } /* * Let BPF tap off a copy before we encapsulate. */ if (bpf_peers_present(ifp->if_bpf)) { struct fw_bpfhdr h; if (unicast) bcopy(destfw, h.firewire_dhost, 8); else bcopy(&firewire_broadcastaddr, h.firewire_dhost, 8); bcopy(&fc->fc_hwaddr, h.firewire_shost, 8); h.firewire_type = htons(type); bpf_mtap2(ifp->if_bpf, &h, sizeof(h), m); } /* * Punt on MCAP for now and send all multicast packets on the * broadcast channel. */ if (m->m_flags & M_MCAST) m->m_flags |= M_BCAST; /* * Figure out what speed to use and what the largest supported * packet size is. For unicast, this is the minimum of what we * can speak and what they can hear. For broadcast, lets be * conservative and use S100. We could possibly improve that * by examining the bus manager's speed map or similar. We * also reduce the packet size for broadcast to account for * the GASP header. */ if (unicast) { speed = min(fc->fc_speed, destfw->sspd); psize = min(512 << speed, 2 << destfw->sender_max_rec); } else { speed = 0; psize = 512 - 2*sizeof(uint32_t); } /* * Next, we encapsulate, possibly fragmenting the original * datagram if it won't fit into a single packet. */ if (m->m_pkthdr.len <= psize - sizeof(uint32_t)) { /* * No fragmentation is necessary. */ M_PREPEND(m, sizeof(uint32_t), M_NOWAIT); if (!m) { error = ENOBUFS; goto bad; } enc = mtod(m, union fw_encap *); enc->unfrag.ether_type = type; enc->unfrag.lf = FW_ENCAP_UNFRAG; enc->unfrag.reserved = 0; /* * Byte swap the encapsulation header manually. */ enc->ul[0] = htonl(enc->ul[0]); error = (ifp->if_transmit)(ifp, m); return (error); } else {
void usbpf_xfertap(struct usb_xfer *xfer, int type) { struct usb_bus *bus; struct usbpf_pkthdr *up; struct usbpf_framehdr *uf; usb_frlength_t offset; uint32_t totlen; uint32_t frame; uint32_t temp; uint32_t nframes; uint32_t x; uint8_t *buf; uint8_t *ptr; bus = xfer->xroot->bus; /* sanity checks */ if (bus->ifp == NULL || bus->ifp->if_bpf == NULL) return; if (!bpf_peers_present(bus->ifp->if_bpf)) return; totlen = usbpf_xfer_precompute_size(xfer, type); if (type == USBPF_XFERTAP_SUBMIT) nframes = xfer->nframes; else nframes = xfer->aframes; /* * XXX TODO XXX * * When BPF supports it we could pass a fragmented array of * buffers avoiding the data copy operation here. */ buf = ptr = malloc(totlen, M_TEMP, M_NOWAIT); if (buf == NULL) { device_printf(bus->parent, "usbpf: Out of memory\n"); return; } up = (struct usbpf_pkthdr *)ptr; ptr += USBPF_HDR_LEN; /* fill out header */ temp = device_get_unit(bus->bdev); up->up_totlen = htole32(totlen); up->up_busunit = htole32(temp); up->up_address = xfer->xroot->udev->device_index; if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) up->up_mode = USBPF_MODE_DEVICE; else up->up_mode = USBPF_MODE_HOST; up->up_type = type; up->up_xfertype = xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE; temp = usbpf_aggregate_xferflags(&xfer->flags); up->up_flags = htole32(temp); temp = usbpf_aggregate_status(&xfer->flags_int); up->up_status = htole32(temp); temp = xfer->error; up->up_error = htole32(temp); temp = xfer->interval; up->up_interval = htole32(temp); up->up_frames = htole32(nframes); temp = xfer->max_packet_size; up->up_packet_size = htole32(temp); temp = xfer->max_packet_count; up->up_packet_count = htole32(temp); temp = xfer->endpointno; up->up_endpoint = htole32(temp); up->up_speed = xfer->xroot->udev->speed; /* clear reserved area */ memset(up->up_reserved, 0, sizeof(up->up_reserved)); /* init offset and frame */ offset = 0; frame = 0; /* iterate all the USB frames and copy data, if any */ for (x = 0; x != nframes; x++) { uint32_t length; int isread; /* get length */ length = xfer->frlengths[x]; /* get frame header pointer */ uf = (struct usbpf_framehdr *)ptr; ptr += USBPF_FRAME_HDR_LEN; /* fill out packet header */ uf->length = htole32(length); uf->flags = 0; /* get information about data read/write */ isread = usbpf_xfer_frame_is_read(xfer, x); /* check if we need to copy any data */ if (isread) { if (type == USBPF_XFERTAP_SUBMIT) length = 0; else { uf->flags |= htole32( USBPF_FRAMEFLAG_DATA_FOLLOWS); } } else { if (type != USBPF_XFERTAP_SUBMIT) length = 0; else { uf->flags |= htole32( USBPF_FRAMEFLAG_DATA_FOLLOWS); } } /* check if data is read direction */ if (isread) uf->flags |= htole32(USBPF_FRAMEFLAG_READ); /* copy USB data, if any */ if (length != 0) { /* copy data */ usbd_copy_out(&xfer->frbuffers[frame], offset, ptr, length); /* align length */ temp = USBPF_FRAME_ALIGN(length); /* zero pad */ if (temp != length) memset(ptr + length, 0, temp - length); ptr += temp; } if (xfer->flags_int.isochronous_xfr) { offset += usbd_xfer_old_frame_length(xfer, x); } else { frame ++; } } bpf_tap(bus->ifp->if_bpf, buf, totlen); free(buf, M_TEMP); }