/* Enqueue a packet 'm' to a queue 'q' and add timestamp to that packet. * Return 1 when unable to add timestamp, otherwise return 0 */ static int codel_enqueue(struct fq_codel_flow *q, struct mbuf *m, struct fq_codel_si *si) { uint64_t len; len = m->m_pkthdr.len; /* finding maximum packet size */ if (len > q->cst.maxpkt_size) q->cst.maxpkt_size = len; /* Add timestamp to mbuf as MTAG */ struct m_tag *mtag; mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL) mtag = m_tag_alloc(MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, sizeof(aqm_time_t), M_NOWAIT); if (mtag == NULL) { m_freem(m); goto drop; } *(aqm_time_t *)(mtag + 1) = AQM_UNOW; m_tag_prepend(m, mtag); mq_append(&q->mq, m); fq_update_stats(q, si, len, 0); return 0; drop: fq_update_stats(q, si, len, 1); m_freem(m); return 1; }
int codel_addq(struct codel *c, class_queue_t *q, struct mbuf *m) { struct m_tag *mtag; uint64_t *enqueue_time; if (qlen(q) < qlimit(q)) { mtag = m_tag_locate(m, MTAG_CODEL, 0, NULL); if (mtag == NULL) mtag = m_tag_alloc(MTAG_CODEL, 0, sizeof(uint64_t), M_NOWAIT); if (mtag == NULL) { m_freem(m); return (-1); } enqueue_time = (uint64_t *)(mtag + 1); *enqueue_time = read_machclk(); m_tag_prepend(m, mtag); _addq(q, m); return (0); } c->drop_overlimit++; m_freem(m); return (-1); }
/** * Called to deliver a frame to either the host, the wire or both. */ int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, void *pvIfData, PINTNETSG pSG, uint32_t fDst) { NOREF(pvIfData); void (*input_f)(struct ifnet *, struct mbuf *); struct ifnet *ifp; struct mbuf *m; struct m_tag *mtag; bool fActive; int error; ifp = ASMAtomicUoReadPtrT(&pThis->u.s.ifp, struct ifnet *); VBOXCURVNET_SET(ifp->if_vnet); if (fDst & INTNETTRUNKDIR_WIRE) { m = vboxNetFltFreeBSDSGMBufFromSG(pThis, pSG); if (m == NULL) return VERR_NO_MEMORY; m = m_pullup(m, ETHER_HDR_LEN); if (m == NULL) return VERR_NO_MEMORY; m->m_flags |= M_PKTHDR; ether_output_frame(ifp, m); } if (fDst & INTNETTRUNKDIR_HOST) { m = vboxNetFltFreeBSDSGMBufFromSG(pThis, pSG); if (m == NULL) return VERR_NO_MEMORY; m = m_pullup(m, ETHER_HDR_LEN); if (m == NULL) return VERR_NO_MEMORY; /* * Delivering packets to the host will be captured by the * input hook. Tag the packet with a mbuf tag so that we * can skip re-delivery of the packet to the guest during * input hook processing. */ mtag = m_tag_alloc(MTAG_VBOX, PACKET_TAG_VBOX, 0, M_NOWAIT); if (mtag == NULL) { m_freem(m); return VERR_NO_MEMORY; } m_tag_init(m); m_tag_prepend(m, mtag); m->m_flags |= M_PKTHDR; m->m_pkthdr.rcvif = ifp; ifp->if_input(ifp, m); } VBOXCURVNET_RESTORE(); return VINF_SUCCESS; }
errno_t ipf_inject_input( mbuf_t data, ipfilter_t filter_ref) { struct mbuf *m = (struct mbuf*)data; struct m_tag *mtag = 0; struct ip *ip = mtod(m, struct ip *); u_int8_t vers; int hlen; errno_t error = 0; protocol_family_t proto; vers = IP_VHL_V(ip->ip_vhl); switch (vers) { case 4: proto = PF_INET; break; case 6: proto = PF_INET6; break; default: error = ENOTSUP; goto done; } if (filter_ref == 0 && m->m_pkthdr.rcvif == 0) { m->m_pkthdr.rcvif = lo_ifp; m->m_pkthdr.csum_data = 0; m->m_pkthdr.csum_flags = 0; if (vers == 4) { hlen = IP_VHL_HL(ip->ip_vhl) << 2; ip->ip_sum = 0; ip->ip_sum = in_cksum(m, hlen); } } if (filter_ref != 0) { mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFILT, sizeof (ipfilter_t), M_NOWAIT, m); if (mtag == NULL) { error = ENOMEM; goto done; } *(ipfilter_t*)(mtag+1) = filter_ref; m_tag_prepend(m, mtag); } error = proto_inject(proto, data); done: return error; }
static errno_t ipf_injectv4_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options) { struct route ro; struct ip *ip; struct mbuf *m = (struct mbuf*)data; errno_t error = 0; struct m_tag *mtag = NULL; struct ip_moptions *imo = NULL; struct ip_out_args ipoa = { IFSCOPE_NONE, { 0 }, 0, 0 }; /* Make the IP header contiguous in the mbuf */ if ((size_t)m->m_len < sizeof (struct ip)) { m = m_pullup(m, sizeof (struct ip)); if (m == NULL) return (ENOMEM); } ip = (struct ip *)m_mtod(m); if (filter_ref != 0) { mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFILT, sizeof (ipfilter_t), M_NOWAIT, m); if (mtag == NULL) { m_freem(m); return (ENOMEM); } *(ipfilter_t *)(mtag + 1) = filter_ref; m_tag_prepend(m, mtag); } if (options != NULL && (options->ippo_flags & IPPOF_MCAST_OPTS) && (imo = ip_allocmoptions(M_DONTWAIT)) != NULL) { imo->imo_multicast_ifp = options->ippo_mcast_ifnet; imo->imo_multicast_ttl = options->ippo_mcast_ttl; imo->imo_multicast_loop = options->ippo_mcast_loop; } if (options != NULL) { if (options->ippo_flags & IPPOF_SELECT_SRCIF) ipoa.ipoa_flags |= IPOAF_SELECT_SRCIF; if (options->ippo_flags & IPPOF_BOUND_IF) { ipoa.ipoa_flags |= IPOAF_BOUND_IF; ipoa.ipoa_boundif = options->ippo_flags >> IPPOF_SHIFT_IFSCOPE; } if (options->ippo_flags & IPPOF_NO_IFT_CELLULAR) ipoa.ipoa_flags |= IPOAF_NO_CELLULAR; if (options->ippo_flags & IPPOF_BOUND_SRCADDR) ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR; }
struct pf_mtag * pf_get_mtag(struct mbuf *m) { struct m_tag *mtag; if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) == NULL) { mtag = m_tag_get(PACKET_TAG_PF, sizeof(struct pf_mtag), M_NOWAIT); if (mtag == NULL) return (NULL); bzero(mtag + 1, sizeof(struct pf_mtag)); m_tag_prepend(m, mtag); } return ((struct pf_mtag *)(mtag + 1)); }
static void encap_fillarg( struct mbuf *m, const struct encaptab *ep) { struct m_tag *tag; struct encaptabtag *et; tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_ENCAP, sizeof(struct encaptabtag), M_WAITOK, m); if (tag != NULL) { et = (struct encaptabtag*)(tag + 1); et->arg = ep->arg; m_tag_prepend(m, tag); } }
int if_promiscinet_add_tag(struct mbuf *m, struct in_l2info *l2i) { struct ifl2info *l2info_tag; l2info_tag = (struct ifl2info *)m_tag_alloc(MTAG_PROMISCINET, MTAG_PROMISCINET_L2INFO, MTAG_PROMISCINET_L2INFO_LEN, M_NOWAIT); if (NULL == l2info_tag) { return (ENOMEM); } in_promisc_l2info_copy(&l2info_tag->ifl2i_info, l2i); m_tag_prepend(m, &l2info_tag->ifl2i_mtag); return (0); }
/* * nbuf_add_tag: add a tag to specified network buffer. * * => Returns 0 on success or errno on failure. */ int nbuf_add_tag(nbuf_t *nbuf, uint32_t key, uint32_t val) { struct mbuf *m = nbuf->nb_mbuf0; struct m_tag *mt; uint32_t *dat; KASSERT(m_flags_p(m, M_PKTHDR)); mt = m_tag_get(PACKET_TAG_NPF, sizeof(uint32_t), M_NOWAIT); if (mt == NULL) { return ENOMEM; } dat = (uint32_t *)(mt + 1); *dat = val; m_tag_prepend(m, mt); return 0; }
int ieee80211_add_callback(struct mbuf* m, void (*func)(struct ieee80211_node*, void*, int), void* arg) { struct m_tag* mtag; struct ieee80211_cb* cb; mtag = m_tag_alloc(MTAG_ABI_NET80211, NET80211_TAG_CALLBACK, sizeof(struct ieee80211_cb), M_NOWAIT); if (mtag == NULL) return 0; cb = (struct ieee80211_cb*)(mtag+1); cb->func = func; cb->arg = arg; m_tag_prepend(m, mtag); m->m_flags |= M_TXCB; return 1; }
errno_t mbuf_tag_allocate( mbuf_t mbuf, mbuf_tag_id_t id, mbuf_tag_type_t type, size_t length, mbuf_how_t how, void** data_p) { struct m_tag *tag; u_int32_t mtag_id_first, mtag_id_last; if (data_p != NULL) *data_p = NULL; /* Sanity check parameters */ (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last, NSI_MBUF_TAG); if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < mtag_id_first || id > mtag_id_last || length < 1 || (length & 0xffff0000) != 0 || data_p == NULL) { return (EINVAL); } /* Make sure this mtag hasn't already been allocated */ tag = m_tag_locate(mbuf, id, type, NULL); if (tag != NULL) { return (EEXIST); } /* Allocate an mtag */ tag = m_tag_create(id, type, length, how, mbuf); if (tag == NULL) { return (how == M_WAITOK ? ENOMEM : EWOULDBLOCK); } /* Attach the mtag and set *data_p */ m_tag_prepend(mbuf, tag); *data_p = tag + 1; return (0); }
static inline int tag_mbuf(struct mbuf *m, int dir, struct ip_fw_args *fwa) { struct dn_pkt_tag *dt; struct m_tag *mtag; mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*dt), M_NOWAIT | M_ZERO); if (mtag == NULL) return 1; /* Cannot allocate packet header. */ m_tag_prepend(m, mtag); /* Attach to mbuf chain. */ dt = (struct dn_pkt_tag *)(mtag + 1); dt->rule = fwa->rule; dt->rule.info &= IPFW_ONEPASS; /* only keep this info */ dt->dn_dir = dir; dt->ifp = fwa->oif; /* dt->output tame is updated as we move through */ dt->output_time = dn_cfg.curr_time; return 0; }
/* * Save incoming source route for use in replies, to be picked up later by * ip_srcroute if the receiver is interested. */ static void save_rte(struct mbuf *m, u_char *option, struct in_addr dst) { unsigned olen; struct ipopt_tag *opts; opts = (struct ipopt_tag *)m_tag_get(PACKET_TAG_IPOPTIONS, sizeof(struct ipopt_tag), M_NOWAIT); if (opts == NULL) return; olen = option[IPOPT_OLEN]; if (olen > sizeof(opts->ip_srcrt) - (1 + sizeof(dst))) { m_tag_free((struct m_tag *)opts); return; } bcopy(option, opts->ip_srcrt.srcopt, olen); opts->ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); opts->ip_srcrt.dst = dst; m_tag_prepend(m, (struct m_tag *)opts); }
int mac_mbuf_init(struct mbuf *m, int flag) { struct m_tag *tag; int error; M_ASSERTPKTHDR(m); if (mac_labeled & MPC_OBJECT_MBUF) { tag = m_tag_get(PACKET_TAG_MACLABEL, sizeof(struct label), flag); if (tag == NULL) return (ENOMEM); error = mac_mbuf_tag_init(tag, flag); if (error) { m_tag_free(tag); return (error); } m_tag_prepend(m, tag); } return (0); }
errno_t mbuf_add_drvaux(mbuf_t mbuf, mbuf_how_t how, u_int32_t family, u_int32_t subfamily, size_t length, void **data_p) { struct m_drvaux_tag *p; struct m_tag *tag; if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) || length == 0 || length > MBUF_DRVAUX_MAXLEN) return (EINVAL); if (data_p != NULL) *data_p = NULL; /* Check if one is already associated */ if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL) return (EEXIST); /* Tag is (m_drvaux_tag + module specific data) */ if ((tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DRVAUX, sizeof (*p) + length, how, mbuf)) == NULL) return ((how == MBUF_WAITOK) ? ENOMEM : EWOULDBLOCK); p = (struct m_drvaux_tag *)(tag + 1); p->da_family = family; p->da_subfamily = subfamily; p->da_length = length; /* Associate the tag */ m_tag_prepend(mbuf, tag); if (data_p != NULL) *data_p = (p + 1); return (0); }
int gre_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct rtentry *rt) { int error = 0; struct gre_softc *sc = (struct gre_softc *) (ifp->if_softc); struct greip *gh = NULL; struct ip *inp = NULL; u_int8_t ip_tos = 0; u_int16_t etype = 0; struct mobile_h mob_h; struct m_tag *mtag; if ((ifp->if_flags & IFF_UP) == 0 || sc->g_src.s_addr == INADDR_ANY || sc->g_dst.s_addr == INADDR_ANY) { m_freem(m); error = ENETDOWN; goto end; } #ifdef DIAGNOSTIC if (ifp->if_rdomain != rtable_l2(m->m_pkthdr.rdomain)) { printf("%s: trying to send packet on wrong domain. " "if %d vs. mbuf %d, AF %d\n", ifp->if_xname, ifp->if_rdomain, rtable_l2(m->m_pkthdr.rdomain), dst->sa_family); } #endif /* Try to limit infinite recursion through misconfiguration. */ for (mtag = m_tag_find(m, PACKET_TAG_GRE, NULL); mtag; mtag = m_tag_find(m, PACKET_TAG_GRE, mtag)) { if (!bcmp((caddr_t)(mtag + 1), &ifp, sizeof(struct ifnet *))) { IF_DROP(&ifp->if_snd); m_freem(m); error = EIO; goto end; } } mtag = m_tag_get(PACKET_TAG_GRE, sizeof(struct ifnet *), M_NOWAIT); if (mtag == NULL) { IF_DROP(&ifp->if_snd); m_freem(m); error = ENOBUFS; goto end; } bcopy(&ifp, (caddr_t)(mtag + 1), sizeof(struct ifnet *)); m_tag_prepend(m, mtag); m->m_flags &= ~(M_BCAST|M_MCAST); #if NBPFILTER > 0 if (ifp->if_bpf) bpf_mtap_af(ifp->if_bpf, dst->sa_family, m, BPF_DIRECTION_OUT); #endif if (sc->g_proto == IPPROTO_MOBILE) { if (ip_mobile_allow == 0) { IF_DROP(&ifp->if_snd); m_freem(m); error = EACCES; goto end; } if (dst->sa_family == AF_INET) { struct mbuf *m0; int msiz; /* * Make sure the complete IP header (with options) * is in the first mbuf. */ if (m->m_len < sizeof(struct ip)) { m = m_pullup(m, sizeof(struct ip)); if (m == NULL) { IF_DROP(&ifp->if_snd); error = ENOBUFS; goto end; } else inp = mtod(m, struct ip *); if (m->m_len < inp->ip_hl << 2) { m = m_pullup(m, inp->ip_hl << 2); if (m == NULL) { IF_DROP(&ifp->if_snd); error = ENOBUFS; goto end; } } } inp = mtod(m, struct ip *); bzero(&mob_h, MOB_H_SIZ_L); mob_h.proto = (inp->ip_p) << 8; mob_h.odst = inp->ip_dst.s_addr; inp->ip_dst.s_addr = sc->g_dst.s_addr; /* * If the packet comes from our host, we only change * the destination address in the IP header. * Otherwise we need to save and change the source. */ if (inp->ip_src.s_addr == sc->g_src.s_addr) { msiz = MOB_H_SIZ_S; } else { mob_h.proto |= MOB_H_SBIT; mob_h.osrc = inp->ip_src.s_addr; inp->ip_src.s_addr = sc->g_src.s_addr; msiz = MOB_H_SIZ_L; } HTONS(mob_h.proto); mob_h.hcrc = gre_in_cksum((u_int16_t *) &mob_h, msiz); /* Squeeze in the mobility header */ if ((m->m_data - msiz) < m->m_pktdat) { /* Need new mbuf */ MGETHDR(m0, M_DONTWAIT, MT_HEADER); if (m0 == NULL) { IF_DROP(&ifp->if_snd); m_freem(m); error = ENOBUFS; goto end; } M_MOVE_HDR(m0, m); m0->m_len = msiz + (inp->ip_hl << 2); m0->m_data += max_linkhdr; m0->m_pkthdr.len = m->m_pkthdr.len + msiz; m->m_data += inp->ip_hl << 2; m->m_len -= inp->ip_hl << 2; bcopy((caddr_t) inp, mtod(m0, caddr_t), sizeof(struct ip)); m0->m_next = m; m = m0; } else { /* we have some space left in the old one */ m->m_data -= msiz; m->m_len += msiz; m->m_pkthdr.len += msiz; bcopy(inp, mtod(m, caddr_t), inp->ip_hl << 2); } /* Copy Mobility header */ inp = mtod(m, struct ip *); bcopy(&mob_h, (caddr_t)(inp + 1), (unsigned) msiz); inp->ip_len = htons(ntohs(inp->ip_len) + msiz); } else { /* AF_INET */
static int ieee1394_output(struct ifnet *ifp, struct mbuf *m0, const struct sockaddr *dst, const struct rtentry *rt) { uint16_t etype = 0; struct mbuf *m; int hdrlen, error = 0; struct mbuf *mcopy = NULL; struct ieee1394_hwaddr *hwdst, baddr; const struct ieee1394_hwaddr *myaddr; #ifdef INET struct arphdr *ah; #endif /* INET */ struct m_tag *mtag; int unicast; if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) senderr(ENETDOWN); /* * If the queueing discipline needs packet classification, * do it before prepending link headers. */ IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family); /* * For unicast, we make a tag to store the lladdr of the * destination. This might not be the first time we have seen * the packet (for instance, the arp code might be trying to * re-send it after receiving an arp reply) so we only * allocate a tag if there isn't one there already. For * multicast, we will eventually use a different tag to store * the channel number. */ unicast = !(m0->m_flags & (M_BCAST | M_MCAST)); if (unicast) { mtag = m_tag_find(m0, MTAG_FIREWIRE_HWADDR, NULL); if (!mtag) { mtag = m_tag_get(MTAG_FIREWIRE_HWADDR, sizeof (struct ieee1394_hwaddr), M_NOWAIT); if (!mtag) { error = ENOMEM; goto bad; } m_tag_prepend(m0, mtag); } hwdst = (struct ieee1394_hwaddr *)(mtag + 1); } else { hwdst = &baddr; } switch (dst->sa_family) { #ifdef INET case AF_INET: if (unicast && (error = arpresolve(ifp, rt, m0, dst, hwdst, sizeof(*hwdst))) != 0) return error == EWOULDBLOCK ? 0 : error; /* if broadcasting on a simplex interface, loopback a copy */ if ((m0->m_flags & M_BCAST) && (ifp->if_flags & IFF_SIMPLEX)) mcopy = m_copy(m0, 0, M_COPYALL); etype = htons(ETHERTYPE_IP); break; case AF_ARP: ah = mtod(m0, struct arphdr *); ah->ar_hrd = htons(ARPHRD_IEEE1394); etype = htons(ETHERTYPE_ARP); break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (unicast && (!nd6_storelladdr(ifp, rt, m0, dst, hwdst->iha_uid, IEEE1394_ADDR_LEN))) { /* something bad happened */ return 0; } etype = htons(ETHERTYPE_IPV6); break; #endif /* INET6 */ case pseudo_AF_HDRCMPLT: case AF_UNSPEC: /* TODO? */ default: printf("%s: can't handle af%d\n", ifp->if_xname, dst->sa_family); senderr(EAFNOSUPPORT); break; } if (mcopy) looutput(ifp, mcopy, dst, rt); myaddr = (const struct ieee1394_hwaddr *)CLLADDR(ifp->if_sadl); if (ifp->if_bpf) { struct ieee1394_bpfhdr h; if (unicast) memcpy(h.ibh_dhost, hwdst->iha_uid, 8); else memcpy(h.ibh_dhost, ((const struct ieee1394_hwaddr *) ifp->if_broadcastaddr)->iha_uid, 8); memcpy(h.ibh_shost, myaddr->iha_uid, 8); h.ibh_type = etype; bpf_mtap2(ifp->if_bpf, &h, sizeof(h), m0); } if ((ifp->if_flags & IFF_SIMPLEX) && unicast && memcmp(hwdst, myaddr, IEEE1394_ADDR_LEN) == 0) return looutput(ifp, m0, dst, rt); /* * XXX: * The maximum possible rate depends on the topology. * So the determination of maxrec and fragmentation should be * called from the driver after probing the topology map. */ if (unicast) { hdrlen = IEEE1394_GASP_LEN; hwdst->iha_speed = 0; /* XXX */ } else hdrlen = 0; if (hwdst->iha_speed > myaddr->iha_speed) hwdst->iha_speed = myaddr->iha_speed; if (hwdst->iha_maxrec > myaddr->iha_maxrec) hwdst->iha_maxrec = myaddr->iha_maxrec; if (hwdst->iha_maxrec > (8 + hwdst->iha_speed)) hwdst->iha_maxrec = 8 + hwdst->iha_speed; if (hwdst->iha_maxrec < 8) hwdst->iha_maxrec = 8; m0 = ieee1394_fragment(ifp, m0, (2<<hwdst->iha_maxrec) - hdrlen, etype); if (m0 == NULL) senderr(ENOBUFS); while ((m = m0) != NULL) { m0 = m->m_nextpkt; error = if_transmit_lock(ifp, m); if (error) { /* mbuf is already freed */ goto bad; } } return 0; bad: while (m0 != NULL) { m = m0->m_nextpkt; m_freem(m0); m0 = m; } return error; }
int ipsec_process_done(struct mbuf *m, struct ipsecrequest *isr) { struct tdb_ident *tdbi; struct m_tag *mtag; struct secasvar *sav; struct secasindex *saidx; int error; IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(isr != NULL, ("null ISR")); sav = isr->sav; IPSEC_ASSERT(sav != NULL, ("null SA")); IPSEC_ASSERT(sav->sah != NULL, ("null SAH")); saidx = &sav->sah->saidx; switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: /* Fix the header length, for AH processing. */ mtod(m, struct ip *)->ip_len = htons(m->m_pkthdr.len); break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Fix the header length, for AH processing. */ if (m->m_pkthdr.len < sizeof (struct ip6_hdr)) { error = ENXIO; goto bad; } if (m->m_pkthdr.len - sizeof (struct ip6_hdr) > IPV6_MAXPACKET) { /* No jumbogram support. */ error = ENXIO; /*?*/ goto bad; } mtod(m, struct ip6_hdr *)->ip6_plen = htons(m->m_pkthdr.len - sizeof(struct ip6_hdr)); break; #endif /* INET6 */ default: DPRINTF(("%s: unknown protocol family %u\n", __func__, saidx->dst.sa.sa_family)); error = ENXIO; goto bad; } /* * Add a record of what we've done or what needs to be done to the * packet. */ mtag = m_tag_get(PACKET_TAG_IPSEC_OUT_DONE, sizeof(struct tdb_ident), M_NOWAIT); if (mtag == NULL) { DPRINTF(("%s: could not get packet tag\n", __func__)); error = ENOMEM; goto bad; } tdbi = (struct tdb_ident *)(mtag + 1); tdbi->dst = saidx->dst; tdbi->proto = saidx->proto; tdbi->spi = sav->spi; m_tag_prepend(m, mtag); /* * If there's another (bundled) SA to apply, do so. * Note that this puts a burden on the kernel stack size. * If this is a problem we'll need to introduce a queue * to set the packet on so we can unwind the stack before * doing further processing. */ if (isr->next) { IPSECSTAT_INC(ips_out_bundlesa); /* XXX-BZ currently only support same AF bundles. */ switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: return ipsec4_process_packet(m, isr->next, 0, 0); /* NOTREACHED */ #endif #ifdef notyet #ifdef INET6 case AF_INET6: /* XXX */ ipsec6_output_trans() ipsec6_output_tunnel() /* NOTREACHED */ #endif /* INET6 */ #endif default: DPRINTF(("%s: unknown protocol family %u\n", __func__, saidx->dst.sa.sa_family)); error = ENXIO; goto bad; } }
static int firewire_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { struct fw_com *fc = IFP2FWC(ifp); int error, type; struct m_tag *mtag; union fw_encap *enc; struct fw_hwaddr *destfw; uint8_t speed; uint16_t psize, fsize, dsize; struct mbuf *mtail; int unicast, dgl, foff; static int next_dgl; #if defined(INET) || defined(INET6) struct llentry *lle; #endif #ifdef MAC error = mac_ifnet_check_transmit(ifp, m); if (error) goto bad; #endif if (!((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))) { error = ENETDOWN; goto bad; } /* * For unicast, we make a tag to store the lladdr of the * destination. This might not be the first time we have seen * the packet (for instance, the arp code might be trying to * re-send it after receiving an arp reply) so we only * allocate a tag if there isn't one there already. For * multicast, we will eventually use a different tag to store * the channel number. */ unicast = !(m->m_flags & (M_BCAST | M_MCAST)); if (unicast) { mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, NULL); if (!mtag) { mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, sizeof (struct fw_hwaddr), M_NOWAIT); if (!mtag) { error = ENOMEM; goto bad; } m_tag_prepend(m, mtag); } destfw = (struct fw_hwaddr *)(mtag + 1); } else { destfw = 0; } switch (dst->sa_family) { #ifdef INET case AF_INET: /* * Only bother with arp for unicast. Allocation of * channels etc. for firewire is quite different and * doesn't fit into the arp model. */ if (unicast) { error = arpresolve(ifp, ro ? ro->ro_rt : NULL, m, dst, (u_char *) destfw, &lle); if (error) return (error == EWOULDBLOCK ? 0 : error); } type = ETHERTYPE_IP; break; case AF_ARP: { struct arphdr *ah; ah = mtod(m, struct arphdr *); ah->ar_hrd = htons(ARPHRD_IEEE1394); type = ETHERTYPE_ARP; if (unicast) *destfw = *(struct fw_hwaddr *) ar_tha(ah); /* * The standard arp code leaves a hole for the target * hardware address which we need to close up. */ bcopy(ar_tpa(ah), ar_tha(ah), ah->ar_pln); m_adj(m, -ah->ar_hln); break; } #endif #ifdef INET6 case AF_INET6: if (unicast) { error = nd6_storelladdr(fc->fc_ifp, m, dst, (u_char *) destfw, &lle); if (error) return (error); } type = ETHERTYPE_IPV6; break; #endif default: if_printf(ifp, "can't handle af%d\n", dst->sa_family); error = EAFNOSUPPORT; goto bad; } /* * Let BPF tap off a copy before we encapsulate. */ if (bpf_peers_present(ifp->if_bpf)) { struct fw_bpfhdr h; if (unicast) bcopy(destfw, h.firewire_dhost, 8); else bcopy(&firewire_broadcastaddr, h.firewire_dhost, 8); bcopy(&fc->fc_hwaddr, h.firewire_shost, 8); h.firewire_type = htons(type); bpf_mtap2(ifp->if_bpf, &h, sizeof(h), m); } /* * Punt on MCAP for now and send all multicast packets on the * broadcast channel. */ if (m->m_flags & M_MCAST) m->m_flags |= M_BCAST; /* * Figure out what speed to use and what the largest supported * packet size is. For unicast, this is the minimum of what we * can speak and what they can hear. For broadcast, lets be * conservative and use S100. We could possibly improve that * by examining the bus manager's speed map or similar. We * also reduce the packet size for broadcast to account for * the GASP header. */ if (unicast) { speed = min(fc->fc_speed, destfw->sspd); psize = min(512 << speed, 2 << destfw->sender_max_rec); } else { speed = 0; psize = 512 - 2*sizeof(uint32_t); } /* * Next, we encapsulate, possibly fragmenting the original * datagram if it won't fit into a single packet. */ if (m->m_pkthdr.len <= psize - sizeof(uint32_t)) { /* * No fragmentation is necessary. */ M_PREPEND(m, sizeof(uint32_t), M_NOWAIT); if (!m) { error = ENOBUFS; goto bad; } enc = mtod(m, union fw_encap *); enc->unfrag.ether_type = type; enc->unfrag.lf = FW_ENCAP_UNFRAG; enc->unfrag.reserved = 0; /* * Byte swap the encapsulation header manually. */ enc->ul[0] = htonl(enc->ul[0]); error = (ifp->if_transmit)(ifp, m); return (error); } else {
static void nglmi_inquire(sc_p sc, int full) { struct mbuf *m; struct ng_tag_prio *ptag; char *cptr, *start; int error; if (sc->lmi_channel == NULL) return; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { log(LOG_ERR, "nglmi: unable to start up LMI processing\n"); return; } m->m_pkthdr.rcvif = NULL; /* Attach a tag to packet, marking it of link level state priority, so * that device driver would put it in the beginning of queue */ ptag = (struct ng_tag_prio *)m_tag_alloc(NGM_GENERIC_COOKIE, NG_TAG_PRIO, (sizeof(struct ng_tag_prio) - sizeof(struct m_tag)), M_NOWAIT); if (ptag != NULL) { /* if it failed, well, it was optional anyhow */ ptag->priority = NG_PRIO_LINKSTATE; ptag->discardability = -1; m_tag_prepend(m, &ptag->tag); } m->m_data += 4; /* leave some room for a header */ cptr = start = mtod(m, char *); /* add in the header for an LMI inquiry. */ *cptr++ = 0x03; /* UI frame */ if (GROUP4(sc)) *cptr++ = 0x09; /* proto discriminator */ else *cptr++ = 0x08; /* proto discriminator */ *cptr++ = 0x00; /* call reference */ *cptr++ = 0x75; /* inquiry */ /* If we are Annex-D, add locking shift to codeset 5. */ if (ANNEXD(sc)) *cptr++ = 0x95; /* locking shift */ /* Add a request type */ if (ANNEXA(sc)) *cptr++ = 0x51; /* report type */ else *cptr++ = 0x01; /* report type */ *cptr++ = 0x01; /* size = 1 */ if (full) *cptr++ = 0x00; /* full */ else *cptr++ = 0x01; /* partial */ /* Add a link verification IE */ if (ANNEXA(sc)) *cptr++ = 0x53; /* verification IE */ else *cptr++ = 0x03; /* verification IE */ *cptr++ = 0x02; /* 2 extra bytes */ *cptr++ = sc->local_seq; *cptr++ = sc->remote_seq; sc->seq_retries++; /* Send it */ m->m_len = m->m_pkthdr.len = cptr - start; NG_SEND_DATA_ONLY(error, sc->lmi_channel, m); /* If we've been sending requests for long enough, and there has * been no response, then mark as DOWN, any DLCIs that are UP. */ if (sc->seq_retries == LMI_PATIENCE) { int count; for (count = 0; count < MAXDLCI; count++) if (sc->dlci_state[count] == DLCI_UP) sc->dlci_state[count] = DLCI_DOWN; } }
int bpf_movein(struct uio *uio, u_int linktype, struct mbuf **mp, struct sockaddr *sockp, struct bpf_insn *filter) { struct mbuf *m; struct m_tag *mtag; int error; u_int hlen; u_int len; u_int slen; /* * Build a sockaddr based on the data link layer type. * We do this at this level because the ethernet header * is copied directly into the data field of the sockaddr. * In the case of SLIP, there is no header and the packet * is forwarded as is. * Also, we are careful to leave room at the front of the mbuf * for the link level header. */ switch (linktype) { case DLT_SLIP: sockp->sa_family = AF_INET; hlen = 0; break; case DLT_PPP: sockp->sa_family = AF_UNSPEC; hlen = 0; break; case DLT_EN10MB: sockp->sa_family = AF_UNSPEC; /* XXX Would MAXLINKHDR be better? */ hlen = ETHER_HDR_LEN; break; case DLT_FDDI: sockp->sa_family = AF_UNSPEC; /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ hlen = 24; break; case DLT_IEEE802_11: case DLT_IEEE802_11_RADIO: sockp->sa_family = AF_UNSPEC; hlen = 0; break; case DLT_RAW: case DLT_NULL: sockp->sa_family = AF_UNSPEC; hlen = 0; break; case DLT_ATM_RFC1483: /* * en atm driver requires 4-byte atm pseudo header. * though it isn't standard, vpi:vci needs to be * specified anyway. */ sockp->sa_family = AF_UNSPEC; hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ break; default: return (EIO); } len = uio->uio_resid; if (len > MCLBYTES) return (EIO); MGETHDR(m, M_WAIT, MT_DATA); m->m_pkthdr.rcvif = 0; m->m_pkthdr.len = len - hlen; if (len > MHLEN) { MCLGET(m, M_WAIT); if ((m->m_flags & M_EXT) == 0) { error = ENOBUFS; goto bad; } } m->m_len = len; *mp = m; error = uiomove(mtod(m, caddr_t), len, uio); if (error) goto bad; slen = bpf_filter(filter, mtod(m, u_char *), len, len); if (slen < len) { error = EPERM; goto bad; } if (m->m_len < hlen) { error = EPERM; goto bad; } /* * Make room for link header, and copy it to sockaddr */ if (hlen != 0) { bcopy(m->m_data, sockp->sa_data, hlen); m->m_len -= hlen; m->m_data += hlen; /* XXX */ } /* * Prepend the data link type as a mbuf tag */ mtag = m_tag_get(PACKET_TAG_DLT, sizeof(u_int), M_NOWAIT); if (mtag == NULL) return (ENOMEM); *(u_int *)(mtag + 1) = linktype; m_tag_prepend(m, mtag); return (0); bad: m_freem(m); return (error); }
int ipfw_check_in(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir, struct inpcb *inp) { struct ip_fw_args args; struct m_tag *dn_tag; int ipfw = 0; int divert; #ifdef IPFIREWALL_FORWARD struct m_tag *fwd_tag; #endif KASSERT(dir == PFIL_IN, ("ipfw_check_in wrong direction!")); if (!fw_enable) goto pass; bzero(&args, sizeof(args)); dn_tag = m_tag_find(*m0, PACKET_TAG_DUMMYNET, NULL); if (dn_tag != NULL){ struct dn_pkt_tag *dt; dt = (struct dn_pkt_tag *)(dn_tag+1); args.rule = dt->rule; m_tag_delete(*m0, dn_tag); } again: args.m = *m0; args.inp = inp; ipfw = ipfw_chk(&args); *m0 = args.m; if ((ipfw & IP_FW_PORT_DENY_FLAG) || *m0 == NULL) goto drop; if (ipfw == 0 && args.next_hop == NULL) goto pass; if (DUMMYNET_LOADED && (ipfw & IP_FW_PORT_DYNT_FLAG) != 0) { ip_dn_io_ptr(*m0, ipfw & 0xffff, DN_TO_IP_IN, &args); *m0 = NULL; return 0; /* packet consumed */ } if (ipfw != 0 && (ipfw & IP_FW_PORT_DYNT_FLAG) == 0) { if ((ipfw & IP_FW_PORT_TEE_FLAG) != 0) divert = ipfw_divert(m0, DIV_DIR_IN, 1); else divert = ipfw_divert(m0, DIV_DIR_IN, 0); /* tee should continue again with the firewall. */ if (divert) { *m0 = NULL; return 0; /* packet consumed */ } else goto again; /* continue with packet */ } #ifdef IPFIREWALL_FORWARD if (ipfw == 0 && args.next_hop != NULL) { fwd_tag = m_tag_get(PACKET_TAG_IPFORWARD, sizeof(struct sockaddr_in), M_NOWAIT); if (fwd_tag == NULL) goto drop; bcopy(args.next_hop, (fwd_tag+1), sizeof(struct sockaddr_in)); m_tag_prepend(*m0, fwd_tag); if (in_localip(args.next_hop->sin_addr)) (*m0)->m_flags |= M_FASTFWD_OURS; goto pass; } #endif drop: if (*m0) m_freem(*m0); *m0 = NULL; return (EACCES); pass: return 0; /* not filtered */ }
/* * When incoming data is appended to the socket, we get notified here. * This is also called whenever a significant event occurs for the socket. * Our original caller may have queued this even some time ago and * we cannot trust that he even still exists. The node however is being * held with a reference by the queueing code and guarantied to be valid. */ static void ng_ksocket_incoming2(node_p node, hook_p hook, void *arg1, int arg2) { struct socket *so = arg1; const priv_p priv = NG_NODE_PRIVATE(node); struct mbuf *m; struct ng_mesg *response; struct uio auio; int s, flags, error; s = splnet(); /* so = priv->so; *//* XXX could have derived this like so */ KASSERT(so == priv->so, ("%s: wrong socket", __func__)); /* Allow next incoming event to be queued. */ atomic_store_rel_int(&priv->fn_sent, 0); /* Check whether a pending connect operation has completed */ if (priv->flags & KSF_CONNECTING) { if ((error = so->so_error) != 0) { so->so_error = 0; soclrstate(so, SS_ISCONNECTING); } if (!(so->so_state & SS_ISCONNECTING)) { NG_MKMESSAGE(response, NGM_KSOCKET_COOKIE, NGM_KSOCKET_CONNECT, sizeof(int32_t), M_WAITOK | M_NULLOK); if (response != NULL) { response->header.flags |= NGF_RESP; response->header.token = priv->response_token; *(int32_t *)response->data = error; /* * send an async "response" message * to the node that set us up * (if it still exists) */ NG_SEND_MSG_ID(error, node, response, priv->response_addr, 0); } priv->flags &= ~KSF_CONNECTING; } } /* Check whether a pending accept operation has completed */ if (priv->flags & KSF_ACCEPTING) { error = ng_ksocket_check_accept(priv); if (error != EWOULDBLOCK) priv->flags &= ~KSF_ACCEPTING; if (error == 0) ng_ksocket_finish_accept(priv); } /* * If we don't have a hook, we must handle data events later. When * the hook gets created and is connected, this upcall function * will be called again. */ if (priv->hook == NULL) { splx(s); return; } /* Read and forward available mbuf's */ auio.uio_td = NULL; auio.uio_resid = 1000000000; flags = MSG_DONTWAIT; while (1) { struct sockaddr *sa = NULL; struct mbuf *n; /* Try to get next packet from socket */ if ((error = soreceive(so, (so->so_state & SS_ISCONNECTED) ? NULL : &sa, &auio, &m, NULL, &flags)) != 0) break; /* See if we got anything */ if (m == NULL) { if (sa != NULL) kfree(sa, M_SONAME); break; } /* * Don't trust the various socket layers to get the * packet header and length correct (e.g. kern/15175). * * Also, do not trust that soreceive() will clear m_nextpkt * for us (e.g. kern/84952, kern/82413). */ m->m_pkthdr.csum_flags = 0; for (n = m, m->m_pkthdr.len = 0; n != NULL; n = n->m_next) { m->m_pkthdr.len += n->m_len; n->m_nextpkt = NULL; } /* Put peer's socket address (if any) into a tag */ if (sa != NULL) { struct sa_tag *stag; stag = (struct sa_tag *)m_tag_alloc(NGM_KSOCKET_COOKIE, NG_KSOCKET_TAG_SOCKADDR, sizeof(ng_ID_t) + sa->sa_len, MB_DONTWAIT); if (stag == NULL) { kfree(sa, M_SONAME); goto sendit; } bcopy(sa, &stag->sa, sa->sa_len); kfree(sa, M_SONAME); stag->id = NG_NODE_ID(node); m_tag_prepend(m, &stag->tag); } sendit: /* Forward data with optional peer sockaddr as packet tag */ NG_SEND_DATA_ONLY(error, priv->hook, m); } /* * If the peer has closed the connection, forward a 0-length mbuf * to indicate end-of-file. */ if (so->so_rcv.sb_state & SBS_CANTRCVMORE && !(priv->flags & KSF_EOFSEEN)) { MGETHDR(m, MB_DONTWAIT, MT_DATA); if (m != NULL) { m->m_len = m->m_pkthdr.len = 0; NG_SEND_DATA_ONLY(error, priv->hook, m); } priv->flags |= KSF_EOFSEEN; } splx(s); }
/* * Ethernet output routine. * Encapsulate a packet of type family for the local net. * Assumes that ifp is actually pointer to arpcom structure. */ int ether_output(struct ifnet *ifp0, struct mbuf *m0, struct sockaddr *dst, struct rtentry *rt0) { u_int16_t etype; int s, len, error = 0; u_char edst[ETHER_ADDR_LEN]; u_char *esrc; struct mbuf *m = m0; struct rtentry *rt; struct mbuf *mcopy = NULL; struct ether_header *eh; struct arpcom *ac = (struct arpcom *)ifp0; short mflags; struct ifnet *ifp = ifp0; #ifdef DIAGNOSTIC if (ifp->if_rdomain != rtable_l2(m->m_pkthdr.ph_rtableid)) { printf("%s: trying to send packet on wrong domain. " "if %d vs. mbuf %d, AF %d\n", ifp->if_xname, ifp->if_rdomain, rtable_l2(m->m_pkthdr.ph_rtableid), dst->sa_family); } #endif #if NTRUNK > 0 /* restrict transmission on trunk members to bpf only */ if (ifp->if_type == IFT_IEEE8023ADLAG && (m_tag_find(m, PACKET_TAG_DLT, NULL) == NULL)) senderr(EBUSY); #endif #if NCARP > 0 if (ifp->if_type == IFT_CARP) { ifp = ifp->if_carpdev; ac = (struct arpcom *)ifp; if ((ifp0->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) senderr(ENETDOWN); } #endif /* NCARP > 0 */ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) senderr(ENETDOWN); if ((rt = rt0) != NULL) { if ((rt->rt_flags & RTF_UP) == 0) { if ((rt0 = rt = rtalloc1(dst, RT_REPORT, m->m_pkthdr.ph_rtableid)) != NULL) rt->rt_refcnt--; else senderr(EHOSTUNREACH); } if (rt->rt_flags & RTF_GATEWAY) { if (rt->rt_gwroute == NULL) goto lookup; if (((rt = rt->rt_gwroute)->rt_flags & RTF_UP) == 0) { rtfree(rt); rt = rt0; lookup: rt->rt_gwroute = rtalloc1(rt->rt_gateway, RT_REPORT, ifp->if_rdomain); if ((rt = rt->rt_gwroute) == NULL) senderr(EHOSTUNREACH); } } if (rt->rt_flags & RTF_REJECT) if (rt->rt_rmx.rmx_expire == 0 || time_second < rt->rt_rmx.rmx_expire) senderr(rt == rt0 ? EHOSTDOWN : EHOSTUNREACH); } esrc = ac->ac_enaddr; switch (dst->sa_family) { #ifdef INET case AF_INET: if (!arpresolve(ac, rt, m, dst, edst)) return (0); /* if not yet resolved */ /* If broadcasting on a simplex interface, loopback a copy */ if ((m->m_flags & M_BCAST) && (ifp->if_flags & IFF_SIMPLEX) && !m->m_pkthdr.pf.routed) mcopy = m_copy(m, 0, (int)M_COPYALL); etype = htons(ETHERTYPE_IP); break; #endif #ifdef INET6 case AF_INET6: if (!nd6_storelladdr(ifp, rt, m, dst, (u_char *)edst)) return (0); /* it must be impossible, but... */ etype = htons(ETHERTYPE_IPV6); break; #endif #ifdef MPLS case AF_MPLS: if (rt) dst = rt_key(rt); else senderr(EHOSTUNREACH); if (!ISSET(ifp->if_xflags, IFXF_MPLS)) senderr(ENETUNREACH); switch (dst->sa_family) { case AF_LINK: if (((struct sockaddr_dl *)dst)->sdl_alen < sizeof(edst)) senderr(EHOSTUNREACH); memcpy(edst, LLADDR((struct sockaddr_dl *)dst), sizeof(edst)); break; case AF_INET: if (!arpresolve(ac, rt, m, dst, edst)) return (0); /* if not yet resolved */ break; default: senderr(EHOSTUNREACH); } /* XXX handling for simplex devices in case of M/BCAST ?? */ if (m->m_flags & (M_BCAST | M_MCAST)) etype = htons(ETHERTYPE_MPLS_MCAST); else etype = htons(ETHERTYPE_MPLS); break; #endif /* MPLS */ case pseudo_AF_HDRCMPLT: eh = (struct ether_header *)dst->sa_data; esrc = eh->ether_shost; /* FALLTHROUGH */ case AF_UNSPEC: eh = (struct ether_header *)dst->sa_data; memcpy(edst, eh->ether_dhost, sizeof(edst)); /* AF_UNSPEC doesn't swap the byte order of the ether_type. */ etype = eh->ether_type; break; default: printf("%s: can't handle af%d\n", ifp->if_xname, dst->sa_family); senderr(EAFNOSUPPORT); } /* XXX Should we feed-back an unencrypted IPsec packet ? */ if (mcopy) (void) looutput(ifp, mcopy, dst, rt); #if NCARP > 0 if (ifp0 != ifp && ifp0->if_type == IFT_CARP) esrc = carp_get_srclladdr(ifp0, esrc); #endif if (ether_addheader(&m, ifp, etype, esrc, edst) == -1) senderr(ENOBUFS); #if NBRIDGE > 0 /* * Interfaces that are bridgeports need special handling for output. */ if (ifp->if_bridgeport) { struct m_tag *mtag; /* * Check if this packet has already been sent out through * this bridgeport, in which case we simply send it out * without further bridge processing. */ for (mtag = m_tag_find(m, PACKET_TAG_BRIDGE, NULL); mtag; mtag = m_tag_find(m, PACKET_TAG_BRIDGE, mtag)) { #ifdef DEBUG /* Check that the information is there */ if (mtag->m_tag_len != sizeof(caddr_t)) { error = EINVAL; goto bad; } #endif if (!memcmp(&ifp->if_bridgeport, mtag + 1, sizeof(caddr_t))) break; } if (mtag == NULL) { /* Attach a tag so we can detect loops */ mtag = m_tag_get(PACKET_TAG_BRIDGE, sizeof(caddr_t), M_NOWAIT); if (mtag == NULL) { error = ENOBUFS; goto bad; } memcpy(mtag + 1, &ifp->if_bridgeport, sizeof(caddr_t)); m_tag_prepend(m, mtag); error = bridge_output(ifp, m, NULL, NULL); return (error); } } #endif mflags = m->m_flags; len = m->m_pkthdr.len; s = splnet(); /* * Queue message on interface, and start output if interface * not yet active. */ IFQ_ENQUEUE(&ifp->if_snd, m, NULL, error); if (error) { /* mbuf is already freed */ splx(s); return (error); } ifp->if_obytes += len; #if NCARP > 0 if (ifp != ifp0) ifp0->if_obytes += len; #endif /* NCARP > 0 */ if (mflags & M_MCAST) ifp->if_omcasts++; if_start(ifp); splx(s); return (error); bad: if (m) m_freem(m); return (error); }
int ipsec_process_done(struct mbuf *m, struct ipsecrequest *isr) { struct tdb_ident *tdbi; struct m_tag *mtag; struct secasvar *sav; struct secasindex *saidx; int error; KASSERT(m != NULL, ("ipsec_process_done: null mbuf")); KASSERT(isr != NULL, ("ipsec_process_done: null ISR")); sav = isr->sav; KASSERT(sav != NULL, ("ipsec_process_done: null SA")); KASSERT(sav->sah != NULL, ("ipsec_process_done: null SAH")); saidx = &sav->sah->saidx; switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: /* Fix the header length, for AH processing. */ mtod(m, struct ip *)->ip_len = htons(m->m_pkthdr.len); break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Fix the header length, for AH processing. */ if (m->m_pkthdr.len < sizeof (struct ip6_hdr)) { error = ENXIO; goto bad; } if (m->m_pkthdr.len - sizeof (struct ip6_hdr) > IPV6_MAXPACKET) { /* No jumbogram support. */ error = ENXIO; /*?*/ goto bad; } mtod(m, struct ip6_hdr *)->ip6_plen = htons(m->m_pkthdr.len - sizeof(struct ip6_hdr)); break; #endif /* INET6 */ default: DPRINTF(("ipsec_process_done: unknown protocol family %u\n", saidx->dst.sa.sa_family)); error = ENXIO; goto bad; } /* * Add a record of what we've done or what needs to be done to the * packet. */ mtag = m_tag_get(PACKET_TAG_IPSEC_OUT_DONE, sizeof(struct tdb_ident), MB_DONTWAIT); if (mtag == NULL) { DPRINTF(("ipsec_process_done: could not get packet tag\n")); error = ENOMEM; goto bad; } tdbi = (struct tdb_ident *)m_tag_data(mtag); tdbi->dst = saidx->dst; tdbi->proto = saidx->proto; tdbi->spi = sav->spi; m_tag_prepend(m, mtag); /* * If there's another (bundled) SA to apply, do so. * Note that this puts a burden on the kernel stack size. * If this is a problem we'll need to introduce a queue * to set the packet on so we can unwind the stack before * doing further processing. */ if (isr->next) { newipsecstat.ips_out_bundlesa++; return ipsec4_process_packet(m, isr->next, 0, 0); } /* * We're done with IPsec processing, transmit the packet using the * appropriate network protocol (IP or IPv6). SPD lookup will be * performed again there. */ switch (saidx->dst.sa.sa_family) { #ifdef INET struct ip *ip; case AF_INET: ip = mtod(m, struct ip *); ip->ip_len = ntohs(ip->ip_len); ip->ip_off = ntohs(ip->ip_off); return ip_output(m, NULL, NULL, IP_RAWOUTPUT, NULL, NULL); #endif /* INET */ #ifdef INET6 case AF_INET6: /* * We don't need massage, IPv6 header fields are always in * net endian. */ return ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); #endif /* INET6 */ } panic("ipsec_process_done"); bad: m_freem(m); KEY_FREESAV(&sav); return (error); }
/* * Potentially decap ESP in UDP frame. Check for an ESP header * and optional marker; if present, strip the UDP header and * push the result through IPSec. * * Returns mbuf to be processed (potentially re-allocated) or * NULL if consumed and/or processed. */ static struct mbuf * udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off) { size_t minlen, payload, skip, iphlen; caddr_t data; struct udpcb *up; struct m_tag *tag; struct udphdr *udphdr; struct ip *ip; INP_RLOCK_ASSERT(inp); /* * Pull up data so the longest case is contiguous: * IP/UDP hdr + non ESP marker + ESP hdr. */ minlen = off + sizeof(uint64_t) + sizeof(struct esp); if (minlen > m->m_pkthdr.len) minlen = m->m_pkthdr.len; if ((m = m_pullup(m, minlen)) == NULL) { IPSECSTAT_INC(ips_in_inval); return (NULL); /* Bypass caller processing. */ } data = mtod(m, caddr_t); /* Points to ip header. */ payload = m->m_len - off; /* Size of payload. */ if (payload == 1 && data[off] == '\xff') return (m); /* NB: keepalive packet, no decap. */ up = intoudpcb(inp); KASSERT(up != NULL, ("%s: udpcb NULL", __func__)); KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0, ("u_flags 0x%x", up->u_flags)); /* * Check that the payload is large enough to hold an * ESP header and compute the amount of data to remove. * * NB: the caller has already done a pullup for us. * XXX can we assume alignment and eliminate bcopys? */ if (up->u_flags & UF_ESPINUDP_NON_IKE) { /* * draft-ietf-ipsec-nat-t-ike-0[01].txt and * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring * possible AH mode non-IKE marker+non-ESP marker * from draft-ietf-ipsec-udp-encaps-00.txt. */ uint64_t marker; if (payload <= sizeof(uint64_t) + sizeof(struct esp)) return (m); /* NB: no decap. */ bcopy(data + off, &marker, sizeof(uint64_t)); if (marker != 0) /* Non-IKE marker. */ return (m); /* NB: no decap. */ skip = sizeof(uint64_t) + sizeof(struct udphdr); } else { uint32_t spi; if (payload <= sizeof(struct esp)) { IPSECSTAT_INC(ips_in_inval); m_freem(m); return (NULL); /* Discard. */ } bcopy(data + off, &spi, sizeof(uint32_t)); if (spi == 0) /* Non-ESP marker. */ return (m); /* NB: no decap. */ skip = sizeof(struct udphdr); } /* * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember * the UDP ports. This is required if we want to select * the right SPD for multiple hosts behind same NAT. * * NB: ports are maintained in network byte order everywhere * in the NAT-T code. */ tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS, 2 * sizeof(uint16_t), M_NOWAIT); if (tag == NULL) { IPSECSTAT_INC(ips_in_nomem); m_freem(m); return (NULL); /* Discard. */ } iphlen = off - sizeof(struct udphdr); udphdr = (struct udphdr *)(data + iphlen); ((uint16_t *)(tag + 1))[0] = udphdr->uh_sport; ((uint16_t *)(tag + 1))[1] = udphdr->uh_dport; m_tag_prepend(m, tag); /* * Remove the UDP header (and possibly the non ESP marker) * IP header length is iphlen * Before: * <--- off ---> * +----+------+-----+ * | IP | UDP | ESP | * +----+------+-----+ * <-skip-> * After: * +----+-----+ * | IP | ESP | * +----+-----+ * <-skip-> */ ovbcopy(data, data + skip, iphlen); m_adj(m, skip); ip = mtod(m, struct ip *); ip->ip_len = htons(ntohs(ip->ip_len) - skip); ip->ip_p = IPPROTO_ESP; /* * We cannot yet update the cksums so clear any * h/w cksum flags as they are no longer valid. */ if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR); (void) ipsec4_common_input(m, iphlen, ip->ip_p); return (NULL); /* NB: consumed, bypass processing. */ }
int ipsec_process_done(struct mbuf *m, struct ipsecrequest *isr) { struct tdb_ident *tdbi; struct m_tag *mtag; struct secasvar *sav; struct secasindex *saidx; int error; IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(isr != NULL, ("null ISR")); sav = isr->sav; IPSEC_ASSERT(sav != NULL, ("null SA")); IPSEC_ASSERT(sav->sah != NULL, ("null SAH")); saidx = &sav->sah->saidx; switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: /* Fix the header length, for AH processing. */ mtod(m, struct ip *)->ip_len = htons(m->m_pkthdr.len); break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Fix the header length, for AH processing. */ if (m->m_pkthdr.len < sizeof (struct ip6_hdr)) { error = ENXIO; goto bad; } if (m->m_pkthdr.len - sizeof (struct ip6_hdr) > IPV6_MAXPACKET) { /* No jumbogram support. */ error = ENXIO; /*?*/ goto bad; } mtod(m, struct ip6_hdr *)->ip6_plen = htons(m->m_pkthdr.len - sizeof(struct ip6_hdr)); break; #endif /* INET6 */ default: DPRINTF(("%s: unknown protocol family %u\n", __func__, saidx->dst.sa.sa_family)); error = ENXIO; goto bad; } /* * Add a record of what we've done or what needs to be done to the * packet. */ mtag = m_tag_get(PACKET_TAG_IPSEC_OUT_DONE, sizeof(struct tdb_ident), M_NOWAIT); if (mtag == NULL) { DPRINTF(("%s: could not get packet tag\n", __func__)); error = ENOMEM; goto bad; } tdbi = (struct tdb_ident *)(mtag + 1); tdbi->dst = saidx->dst; tdbi->proto = saidx->proto; tdbi->spi = sav->spi; m_tag_prepend(m, mtag); /* * If there's another (bundled) SA to apply, do so. * Note that this puts a burden on the kernel stack size. * If this is a problem we'll need to introduce a queue * to set the packet on so we can unwind the stack before * doing further processing. */ if (isr->next) { /* XXX-BZ currently only support same AF bundles. */ switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: IPSECSTAT_INC(ips_out_bundlesa); return ipsec4_process_packet(m, isr->next); /* NOTREACHED */ #endif #ifdef notyet #ifdef INET6 case AF_INET6: /* XXX */ IPSEC6STAT_INC(ips_out_bundlesa); return ipsec6_process_packet(m, isr->next); /* NOTREACHED */ #endif /* INET6 */ #endif default: DPRINTF(("%s: unknown protocol family %u\n", __func__, saidx->dst.sa.sa_family)); error = ENXIO; goto bad; } } key_sa_recordxfer(sav, m); /* record data transfer */ /* * We're done with IPsec processing, transmit the packet using the * appropriate network protocol (IP or IPv6). SPD lookup will be * performed again there. */ switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: #ifdef IPSEC_NAT_T /* * If NAT-T is enabled, now that all IPsec processing is done * insert UDP encapsulation header after IP header. */ if (sav->natt_type) { struct ip *ip = mtod(m, struct ip *); const int hlen = (ip->ip_hl << 2); int size, off; struct mbuf *mi; struct udphdr *udp; size = sizeof(struct udphdr); if (sav->natt_type == UDP_ENCAP_ESPINUDP_NON_IKE) { /* * draft-ietf-ipsec-nat-t-ike-0[01].txt and * draft-ietf-ipsec-udp-encaps-(00/)01.txt, * ignoring possible AH mode * non-IKE marker + non-ESP marker * from draft-ietf-ipsec-udp-encaps-00.txt. */ size += sizeof(u_int64_t); } mi = m_makespace(m, hlen, size, &off); if (mi == NULL) { DPRINTF(("%s: m_makespace for udphdr failed\n", __func__)); error = ENOBUFS; goto bad; } udp = (struct udphdr *)(mtod(mi, caddr_t) + off); if (sav->natt_type == UDP_ENCAP_ESPINUDP_NON_IKE) udp->uh_sport = htons(UDP_ENCAP_ESPINUDP_PORT); else udp->uh_sport = KEY_PORTFROMSADDR(&sav->sah->saidx.src); udp->uh_dport = KEY_PORTFROMSADDR(&sav->sah->saidx.dst); udp->uh_sum = 0; udp->uh_ulen = htons(m->m_pkthdr.len - hlen); ip->ip_len = htons(m->m_pkthdr.len); ip->ip_p = IPPROTO_UDP; if (sav->natt_type == UDP_ENCAP_ESPINUDP_NON_IKE) *(u_int64_t *)(udp + 1) = 0; } #endif /* IPSEC_NAT_T */ return ip_output(m, NULL, NULL, IP_RAWOUTPUT, NULL, NULL); #endif /* INET */ #ifdef INET6 case AF_INET6: /* * We don't need massage, IPv6 header fields are always in * net endian. */ return ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); #endif /* INET6 */ } panic("ipsec_process_done"); bad: m_freem(m); return (error); }
/* * When incoming data is appended to the socket, we get notified here. * This is also called whenever a significant event occurs for the socket. * Our original caller may have queued this even some time ago and * we cannot trust that he even still exists. The node however is being * held with a reference by the queueing code and guarantied to be valid. */ static void ng_ksocket_incoming2(node_p node, hook_p hook, void *arg1, int arg2) { struct socket *so = arg1; const priv_p priv = NG_NODE_PRIVATE(node); struct ng_mesg *response; struct uio auio; int flags, error; KASSERT(so == priv->so, ("%s: wrong socket", __func__)); /* Allow next incoming event to be queued. */ atomic_store_rel_int(&priv->fn_sent, 0); /* Check whether a pending connect operation has completed */ if (priv->flags & KSF_CONNECTING) { if ((error = so->so_error) != 0) { so->so_error = 0; so->so_state &= ~SS_ISCONNECTING; } if (!(so->so_state & SS_ISCONNECTING)) { NG_MKMESSAGE(response, NGM_KSOCKET_COOKIE, NGM_KSOCKET_CONNECT, sizeof(int32_t), M_NOWAIT); if (response != NULL) { response->header.flags |= NGF_RESP; response->header.token = priv->response_token; *(int32_t *)response->data = error; /* * send an async "response" message * to the node that set us up * (if it still exists) */ NG_SEND_MSG_ID(error, node, response, priv->response_addr, 0); } priv->flags &= ~KSF_CONNECTING; } } /* Check whether a pending accept operation has completed */ if (priv->flags & KSF_ACCEPTING) { error = ng_ksocket_check_accept(priv); if (error != EWOULDBLOCK) priv->flags &= ~KSF_ACCEPTING; if (error == 0) ng_ksocket_finish_accept(priv); } /* * If we don't have a hook, we must handle data events later. When * the hook gets created and is connected, this upcall function * will be called again. */ if (priv->hook == NULL) return; /* Read and forward available mbuf's */ auio.uio_td = NULL; auio.uio_resid = MJUMPAGESIZE; /* XXXGL: sane limit? */ flags = MSG_DONTWAIT; while (1) { struct sockaddr *sa = NULL; struct mbuf *m; /* Try to get next packet from socket */ if ((error = soreceive(so, (so->so_state & SS_ISCONNECTED) ? NULL : &sa, &auio, &m, NULL, &flags)) != 0) break; /* See if we got anything */ if (m == NULL) { if (sa != NULL) free(sa, M_SONAME); break; } KASSERT(m->m_nextpkt == NULL, ("%s: nextpkt", __func__)); /* * Stream sockets do not have packet boundaries, so * we have to allocate a header mbuf and attach the * stream of data to it. */ if (so->so_type == SOCK_STREAM) { struct mbuf *mh; mh = m_gethdr(M_NOWAIT, MT_DATA); if (mh == NULL) { m_freem(m); if (sa != NULL) free(sa, M_SONAME); break; } mh->m_next = m; for (; m; m = m->m_next) mh->m_pkthdr.len += m->m_len; m = mh; } /* Put peer's socket address (if any) into a tag */ if (sa != NULL) { struct sa_tag *stag; stag = (struct sa_tag *)m_tag_alloc(NGM_KSOCKET_COOKIE, NG_KSOCKET_TAG_SOCKADDR, sizeof(ng_ID_t) + sa->sa_len, M_NOWAIT); if (stag == NULL) { free(sa, M_SONAME); goto sendit; } bcopy(sa, &stag->sa, sa->sa_len); free(sa, M_SONAME); stag->id = NG_NODE_ID(node); m_tag_prepend(m, &stag->tag); } sendit: /* Forward data with optional peer sockaddr as packet tag */ NG_SEND_DATA_ONLY(error, priv->hook, m); } /* * If the peer has closed the connection, forward a 0-length mbuf * to indicate end-of-file. */ if (so->so_rcv.sb_state & SBS_CANTRCVMORE && !(priv->flags & KSF_EOFSEEN)) { struct mbuf *m; m = m_gethdr(M_NOWAIT, MT_DATA); if (m != NULL) NG_SEND_DATA_ONLY(error, priv->hook, m); priv->flags |= KSF_EOFSEEN; } }