/* * Add privacy headers and do any s/w encryption required. */ static int tkip_encap(struct ieee80211_key *k, struct mbuf *m, uint8_t keyid) { struct tkip_ctx *ctx = k->wk_private; struct ieee80211vap *vap = ctx->tc_vap; struct ieee80211com *ic = vap->iv_ic; uint8_t *ivp; int hdrlen; /* * Handle TKIP counter measures requirement. */ if (vap->iv_flags & IEEE80211_F_COUNTERM) { #ifdef IEEE80211_DEBUG struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); #endif IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2, "discard frame due to countermeasures (%s)", __func__); vap->iv_stats.is_crypto_tkipcm++; return 0; } hdrlen = ieee80211_hdrspace(ic, mtod(m, void *)); /* * Copy down 802.11 header and add the IV, KeyID, and ExtIV. */ M_PREPEND(m, tkip.ic_header, M_NOWAIT); if (m == NULL) return 0; ivp = mtod(m, uint8_t *); memmove(ivp, ivp + tkip.ic_header, hdrlen); ivp += hdrlen; ivp[0] = k->wk_keytsc >> 8; /* TSC1 */ ivp[1] = (ivp[0] | 0x20) & 0x7f; /* WEP seed */ ivp[2] = k->wk_keytsc >> 0; /* TSC0 */ ivp[3] = keyid | IEEE80211_WEP_EXTIV; /* KeyID | ExtID */ ivp[4] = k->wk_keytsc >> 16; /* TSC2 */ ivp[5] = k->wk_keytsc >> 24; /* TSC3 */ ivp[6] = k->wk_keytsc >> 32; /* TSC4 */ ivp[7] = k->wk_keytsc >> 40; /* TSC5 */ #if 0 if (hdrlen == 24) { M_PREPEND(m, tkip.ic_miclen + 12, M_NOWAIT); if (m == NULL) return 0; ivp = mtod(m, uint8_t *); memmove(ivp, ivp + tkip.ic_miclen + 12, hdrlen + tkip.ic_header); ivp += hdrlen + tkip.ic_header; bzero(ivp, 20); memcpy((ivp+12), k->wk_txmic, tkip.ic_header); }
/* * Generate IP header and pass packet to ip_output. * Tack on options user may have setup with control call. */ int rip_output(struct mbuf *m, struct socket *so, u_long dst) { struct ip *ip; struct inpcb *inp = sotoinpcb(so); int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) | IP_ALLOWBROADCAST; /* * If the user handed us a complete IP packet, use it. * Otherwise, allocate an mbuf for a header and fill it in. */ if ((inp->inp_flags & INP_HDRINCL) == 0) { if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { m_freem(m); return(EMSGSIZE); } M_PREPEND(m, sizeof(struct ip), M_WAIT); ip = mtod(m, struct ip *); ip->ip_tos = 0; ip->ip_off = 0; ip->ip_p = inp->inp_ip_p; ip->ip_len = m->m_pkthdr.len; ip->ip_src = inp->inp_laddr; ip->ip_dst.s_addr = dst; ip->ip_ttl = MAXTTL; } else { if (m->m_pkthdr.len > IP_MAXPACKET) {
/* * Handles creation of the ethernet header, then places outgoing packets into * the tx buffer for the NIC * * Parameters: * m The mbuf containing the packet to be sent (will be freed by * this function or the NIC driver) * ifp The interface to send on * dst The destination ethernet address (source address will be looked * up using ifp) * etype The ETHERTYPE_* value for the protocol that is being sent * * Returns: * int see errno.h, 0 for success */ static int netdump_ether_output(struct mbuf *m, struct ifnet *ifp, struct ether_addr dst, u_short etype) { struct ether_header *eh; if (((ifp->if_flags & (IFF_MONITOR | IFF_UP)) != IFF_UP) || (ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING) { if_printf(ifp, "netdump_ether_output: interface isn't up\n"); m_freem(m); return (ENETDOWN); } /* Fill in the ethernet header. */ M_PREPEND(m, ETHER_HDR_LEN, M_NOWAIT); if (m == NULL) { printf("%s: out of mbufs\n", __func__); return (ENOBUFS); } eh = mtod(m, struct ether_header *); memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN); memcpy(eh->ether_dhost, dst.octet, ETHER_ADDR_LEN); eh->ether_type = htons(etype); return ((ifp->if_netdump_methods->nd_transmit)(ifp, m)); }
static void sscfu_send_lower(struct sscfu *sscf, void *p, enum sscop_aasig sig, struct mbuf *m, u_int arg) { node_p node = (node_p)p; struct priv *priv = NG_NODE_PRIVATE(node); int error; struct sscop_arg *a; if (priv->lower == NULL) { if (m != NULL) m_freem(m); return; } if (m == NULL) { MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return; m->m_len = sizeof(struct sscop_arg); m->m_pkthdr.len = m->m_len; } else { M_PREPEND(m, sizeof(struct sscop_arg), M_NOWAIT); if (m == NULL) return; } a = mtod(m, struct sscop_arg *); a->sig = sig; a->arg = arg; NG_SEND_DATA_ONLY(error, priv->lower, m); }
/* Async. stream output */ static void fwe_as_output(struct fwe_softc *fwe, struct ifnet *ifp) { struct mbuf *m; struct fw_xfer *xfer; struct fw_xferq *xferq; struct fw_pkt *fp; int i = 0; xfer = NULL; xferq = fwe->fd.fc->atq; while ((xferq->queued < xferq->maxq - 1) && (ifp->if_snd.ifq_head != NULL)) { FWE_LOCK(fwe); xfer = STAILQ_FIRST(&fwe->xferlist); if (xfer == NULL) { #if 0 printf("if_fwe: lack of xfer\n"); #endif FWE_UNLOCK(fwe); break; } STAILQ_REMOVE_HEAD(&fwe->xferlist, link); FWE_UNLOCK(fwe); IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { FWE_LOCK(fwe); STAILQ_INSERT_HEAD(&fwe->xferlist, xfer, link); FWE_UNLOCK(fwe); break; } BPF_MTAP(ifp, m); /* keep ip packet alignment for alpha */ M_PREPEND(m, ETHER_ALIGN, M_NOWAIT); fp = &xfer->send.hdr; *(uint32_t *)&xfer->send.hdr = *(int32_t *)&fwe->pkt_hdr; fp->mode.stream.len = m->m_pkthdr.len; xfer->mbuf = m; xfer->send.pay_len = m->m_pkthdr.len; if (fw_asyreq(fwe->fd.fc, -1, xfer) != 0) { /* error */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* XXX set error code */ fwe_output_callback(xfer); } else { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); i++; } } #if 0 if (i > 1) printf("%d queued\n", i); #endif if (i > 0) xferq->start(fwe->fd.fc); }
/* Async. stream output */ static void fwe_as_output(struct fwe_softc *fwe, struct ifnet *ifp) { struct mbuf *m; struct fw_xfer *xfer; struct fw_xferq *xferq; struct fw_pkt *fp; int i = 0; xfer = NULL; xferq = fwe->fd.fc->atq; while (xferq->queued < xferq->maxq) { IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; xfer = fw_xfer_alloc(); if (xfer == NULL) { return; } #if __FreeBSD_version >= 500000 BPF_MTAP(ifp, m); #else if (ifp->if_bpf != NULL) bpf_mtap(ifp, m); #endif xfer->send.off = 0; xfer->spd = 2; xfer->fc = fwe->fd.fc; xfer->retry_req = fw_asybusy; xfer->sc = (caddr_t)fwe; xfer->act.hand = fwe_output_callback; /* keep ip packet alignment for alpha */ M_PREPEND(m, ALIGN_PAD, M_DONTWAIT); fp = (struct fw_pkt *)&xfer->dst; /* XXX */ xfer->dst = *((int32_t *)&fwe->pkt_hdr); fp->mode.stream.len = htons(m->m_pkthdr.len); xfer->send.buf = (caddr_t) fp; xfer->mbuf = m; xfer->send.len = m->m_pkthdr.len + HDR_LEN; i++; if (fw_asyreq(xfer->fc, -1, xfer) != 0) { /* error */ ifp->if_oerrors ++; /* XXX set error code */ fwe_output_callback(xfer); } else { ifp->if_opackets ++; } } #if 0 if (i > 1) printf("%d queued\n", i); #endif if (xfer != NULL) xferq->start(xfer->fc); }
int mpeoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct rtentry *rt) { struct shim_hdr shim; int s; int error; int off; u_int8_t op = 0; #ifdef DIAGNOSTIC if (ifp->if_rdomain != rtable_l2(m->m_pkthdr.rdomain)) { printf("%s: trying to send packet on wrong domain. " "if %d vs. mbuf %d\n", ifp->if_xname, ifp->if_rdomain, rtable_l2(m->m_pkthdr.rdomain)); } #endif m->m_pkthdr.rcvif = ifp; /* XXX assumes MPLS is always in rdomain 0 */ m->m_pkthdr.rdomain = 0; error = 0; switch (dst->sa_family) { #ifdef INET case AF_INET: if (rt && rt->rt_flags & RTF_MPLS) { shim.shim_label = ((struct rt_mpls *)rt->rt_llinfo)->mpls_label; shim.shim_label |= MPLS_BOS_MASK; op = ((struct rt_mpls *)rt->rt_llinfo)->mpls_operation; } if (op != MPLS_OP_PUSH) { m_freem(m); error = ENETUNREACH; goto out; } if (mpls_mapttl_ip) { struct ip *ip; ip = mtod(m, struct ip *); shim.shim_label |= htonl(ip->ip_ttl) & MPLS_TTL_MASK; } else shim.shim_label |= htonl(mpls_defttl) & MPLS_TTL_MASK; off = sizeof(sa_family_t) + sizeof(in_addr_t); M_PREPEND(m, sizeof(shim) + off, M_DONTWAIT); if (m == NULL) { error = ENOBUFS; goto out; } *mtod(m, sa_family_t *) = AF_INET; m_copyback(m, sizeof(sa_family_t), sizeof(in_addr_t), (caddr_t)&((satosin(dst)->sin_addr)), M_NOWAIT); break; #endif default: m_freem(m); error = EPFNOSUPPORT; goto out; }
/* * Ethernet output routine. * Encapsulate a packet of type family for the local net. * Use trailer local net encapsulation if enough data in first * packet leaves a multiple of 512 bytes of data in remainder. */ int ether_frameout( struct ifnet *ifp, struct mbuf **m, const struct sockaddr *ndest, const char *edst, const char *ether_type) { struct ether_header *eh; int hlen; /* link layer header length */ hlen = ETHER_HDR_LEN; /* * If a simplex interface, and the packet is being sent to our * Ethernet address or a broadcast address, loopback a copy. * XXX To make a simplex device behave exactly like a duplex * device, we should copy in the case of sending to our own * ethernet address (thus letting the original actually appear * on the wire). However, we don't do that here for security * reasons and compatibility with the original behavior. */ if ((ifp->if_flags & IFF_SIMPLEX) && ((*m)->m_flags & M_LOOP)) { if (lo_ifp) { if ((*m)->m_flags & M_BCAST) { struct mbuf *n = m_copy(*m, 0, (int)M_COPYALL); if (n != NULL) dlil_output(lo_ifp, ndest->sa_family, n, NULL, ndest, 0); } else { if (_ether_cmp(edst, ifnet_lladdr(ifp)) == 0) { dlil_output(lo_ifp, ndest->sa_family, *m, NULL, ndest, 0); return EJUSTRETURN; } } } } /* * Add local net header. If no space in first mbuf, * allocate another. */ M_PREPEND(*m, sizeof (struct ether_header), M_DONTWAIT); if (*m == 0) { return (EJUSTRETURN); } eh = mtod(*m, struct ether_header *); (void)memcpy(&eh->ether_type, ether_type, sizeof(eh->ether_type)); (void)memcpy(eh->ether_dhost, edst, ETHER_ADDR_LEN); ifnet_lladdr_copy_bytes(ifp, eh->ether_shost, ETHER_ADDR_LEN); return 0; }
static bool_t svc_vc_backchannel_reply(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr *addr, struct mbuf *m, uint32_t *seq) { struct ct_data *ct; XDR xdrs; struct mbuf *mrep; bool_t stat = TRUE; int error; /* * Leave space for record mark. */ mrep = m_gethdr(M_WAITOK, MT_DATA); mrep->m_data += sizeof(uint32_t); xdrmbuf_create(&xdrs, mrep, XDR_ENCODE); if (msg->rm_reply.rp_stat == MSG_ACCEPTED && msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { if (!xdr_replymsg(&xdrs, msg)) stat = FALSE; else xdrmbuf_append(&xdrs, m); } else { stat = xdr_replymsg(&xdrs, msg); } if (stat) { m_fixhdr(mrep); /* * Prepend a record marker containing the reply length. */ M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK); *mtod(mrep, uint32_t *) = htonl(0x80000000 | (mrep->m_pkthdr.len - sizeof(uint32_t))); sx_xlock(&xprt->xp_lock); ct = (struct ct_data *)xprt->xp_p2; if (ct != NULL) error = sosend(ct->ct_socket, NULL, NULL, mrep, NULL, 0, curthread); else error = EPIPE; sx_xunlock(&xprt->xp_lock); if (!error) { stat = TRUE; } } else { m_freem(mrep); } XDR_DESTROY(&xdrs); return (stat); }
static inline int ether_addheader(struct mbuf **m, struct ifnet *ifp, u_int16_t etype, u_char *esrc, u_char *edst) { struct ether_header *eh; #if NVLAN > 0 if ((*m)->m_flags & M_VLANTAG) { struct ifvlan *ifv = ifp->if_softc; struct ifnet *p = ifv->ifv_p; /* should we use the tx tagging hw offload at all? */ if ((p->if_capabilities & IFCAP_VLAN_HWTAGGING) && (ifv->ifv_type == ETHERTYPE_VLAN)) { (*m)->m_pkthdr.ether_vtag = ifv->ifv_tag + ((*m)->m_pkthdr.pf.prio << EVL_PRIO_BITS); /* don't return, need to add regular ethernet header */ } else { struct ether_vlan_header *evh; M_PREPEND(*m, sizeof(*evh), M_DONTWAIT); if (*m == NULL) return (-1); evh = mtod(*m, struct ether_vlan_header *); memcpy(evh->evl_dhost, edst, sizeof(evh->evl_dhost)); memcpy(evh->evl_shost, esrc, sizeof(evh->evl_shost)); evh->evl_proto = etype; evh->evl_encap_proto = htons(ifv->ifv_type); evh->evl_tag = htons(ifv->ifv_tag + ((*m)->m_pkthdr.pf.prio << EVL_PRIO_BITS)); (*m)->m_flags &= ~M_VLANTAG; return (0); } } #endif /* NVLAN > 0 */ M_PREPEND(*m, ETHER_HDR_LEN, M_DONTWAIT); if (*m == NULL) return (-1); eh = mtod(*m, struct ether_header *); eh->ether_type = etype; memcpy(eh->ether_dhost, edst, sizeof(eh->ether_dhost)); memcpy(eh->ether_shost, esrc, sizeof(eh->ether_shost)); return (0); }
static bool_t svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr *addr, struct mbuf *m, uint32_t *seq) { XDR xdrs; struct mbuf *mrep; bool_t stat = TRUE; int error, len; /* * Leave space for record mark. */ mrep = m_gethdr(M_WAITOK, MT_DATA); mrep->m_data += sizeof(uint32_t); xdrmbuf_create(&xdrs, mrep, XDR_ENCODE); if (msg->rm_reply.rp_stat == MSG_ACCEPTED && msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { if (!xdr_replymsg(&xdrs, msg)) stat = FALSE; else xdrmbuf_append(&xdrs, m); } else { stat = xdr_replymsg(&xdrs, msg); } if (stat) { m_fixhdr(mrep); /* * Prepend a record marker containing the reply length. */ M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK); len = mrep->m_pkthdr.len; *mtod(mrep, uint32_t *) = htonl(0x80000000 | (len - sizeof(uint32_t))); atomic_add_32(&xprt->xp_snd_cnt, len); error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL, 0, curthread); if (!error) { atomic_add_rel_32(&xprt->xp_snt_cnt, len); if (seq) *seq = xprt->xp_snd_cnt; stat = TRUE; } else atomic_subtract_32(&xprt->xp_snd_cnt, len); } else { m_freem(mrep); } XDR_DESTROY(&xdrs); return (stat); }
static void put_uint32(struct mbuf **mp, uint32_t v) { struct mbuf *m = *mp; uint32_t n; M_PREPEND(m, sizeof(uint32_t), M_WAITOK); n = htonl(v); bcopy(&n, mtod(m, uint32_t *), sizeof(uint32_t)); *mp = m; }
M_BLK_ID endEtherAddressForm ( M_BLK_ID pMblk, /* pointer to packet mBlk */ M_BLK_ID pSrcAddr, /* pointer to source address */ M_BLK_ID pDstAddr, /* pointer to destination address */ BOOL bcastFlag /* use link-level broadcast? */ ) { USHORT *pDst; USHORT *pSrc; M_PREPEND(pMblk, SIZEOF_ETHERHEADER, M_DONTWAIT); /* * This routine has been optimized somewhat in order to avoid * the use of bcopy(). On some architectures, a bcopy() could * result in a call into (allegedly) optimized architecture- * specific routines. This may be fine for copying large chunks * of data, but we're only copying 6 bytes. It's simpler just * to open code some 16-bit assignments. The compiler would be * hard-pressed to produce sub-optimal code for this, and it * avoids at least one function call (possibly several). */ if (pMblk != NULL) { pDst = (USHORT *)pMblk->m_data; if (bcastFlag) { pDst[0] = 0xFFFF; pDst[1] = 0xFFFF; pDst[2] = 0xFFFF; } else { pSrc = (USHORT *)pDstAddr->m_data; pDst[0] = pSrc[0]; pDst[1] = pSrc[1]; pDst[2] = pSrc[2]; } /* Advance to the source address field, fill it in. */ pDst += 3; pSrc = (USHORT *)pSrcAddr->m_data; pDst[0] = pSrc[0]; pDst[1] = pSrc[1]; pDst[2] = pSrc[2]; ((struct ether_header *)pMblk->m_data)->ether_type = pDstAddr->mBlkHdr.reserved; } return(pMblk); }
int mveth_tx(struct mv64340_private *mp, char *data, int len, int nbufs) { int rval = -1,l; char *p; struct mbuf *m; char *emsg = 0; rtems_bsdnet_semaphore_obtain(); MGETHDR(m, M_WAIT, MT_DATA); if ( !m ) { emsg="Unable to allocate header\n"; goto bail; } MCLGET(m, M_WAIT); if ( !(m->m_flags & M_EXT) ) { m_freem(m); emsg="Unable to allocate cluster\n"; goto bail; } p = mtod(m, char *); l = 0; switch (nbufs) { case 3: default: emsg="nbufs arg must be 1..3\n"; goto bail; case 1: l += sizeof(BcHeader); memcpy(p, &BcHeader, sizeof(BcHeader)); p += sizeof(BcHeader); case 2: memcpy(p,data,len); l += len; m->m_len = m->m_pkthdr.len = l; if ( 2 == nbufs ) { M_PREPEND(m, sizeof (BcHeader), M_WAIT); if (!m) { emsg = "Unable to prepend\n"; goto bail; } p = mtod(m, char*); memcpy(p,&BcHeader,sizeof(BcHeader)); l += sizeof(BcHeader); } break; }
/* * Unreliable transmission of an mbuf chain to the netdump server * Note: can't handle fragmentation; fails if the packet is larger than * nd_ifp->if_mtu after adding the UDP/IP headers * * Parameters: * m mbuf chain * * Returns: * int see errno.h, 0 for success */ static int netdump_udp_output(struct mbuf *m) { struct udpiphdr *ui; struct ip *ip; MPASS(nd_ifp != NULL); M_PREPEND(m, sizeof(struct udpiphdr), M_NOWAIT); if (m == NULL) { printf("%s: out of mbufs\n", __func__); return (ENOBUFS); } if (m->m_pkthdr.len > nd_ifp->if_mtu) { printf("netdump_udp_output: Packet is too big: %d > MTU %u\n", m->m_pkthdr.len, nd_ifp->if_mtu); m_freem(m); return (ENOBUFS); } ui = mtod(m, struct udpiphdr *); bzero(ui->ui_x1, sizeof(ui->ui_x1)); ui->ui_pr = IPPROTO_UDP; ui->ui_len = htons(m->m_pkthdr.len - sizeof(struct ip)); ui->ui_ulen = ui->ui_len; ui->ui_src = nd_client; ui->ui_dst = nd_server; /* Use this src port so that the server can connect() the socket */ ui->ui_sport = htons(NETDUMP_ACKPORT); ui->ui_dport = htons(nd_server_port); ui->ui_sum = 0; if ((ui->ui_sum = in_cksum(m, m->m_pkthdr.len)) == 0) ui->ui_sum = 0xffff; ip = mtod(m, struct ip *); ip->ip_v = IPVERSION; ip->ip_hl = sizeof(struct ip) >> 2; ip->ip_tos = 0; ip->ip_len = htons(m->m_pkthdr.len); ip->ip_id = 0; ip->ip_off = htons(IP_DF); ip->ip_ttl = 255; ip->ip_sum = 0; ip->ip_sum = in_cksum(m, sizeof(struct ip)); return (netdump_ether_output(m, nd_ifp, nd_gw_mac, ETHERTYPE_IP)); }
/* * Add privacy headers appropriate for the specified key. */ static int wep_encap(struct ieee80211_key *k, struct mbuf *m, uint8_t keyid) { struct wep_ctx *ctx = k->wk_private; struct ieee80211com *ic = ctx->wc_ic; uint32_t iv; uint8_t *ivp; int hdrlen; hdrlen = ieee80211_hdrspace(ic, mtod(m, void *)); /* * Copy down 802.11 header and add the IV + KeyID. */ M_PREPEND(m, wep.ic_header, MB_DONTWAIT); if (m == NULL) return 0; ivp = mtod(m, uint8_t *); ovbcopy(ivp + wep.ic_header, ivp, hdrlen); ivp += hdrlen; /* * XXX * IV must not duplicate during the lifetime of the key. * But no mechanism to renew keys is defined in IEEE 802.11 * for WEP. And the IV may be duplicated at other stations * because the session key itself is shared. So we use a * pseudo random IV for now, though it is not the right way. * * NB: Rather than use a strictly random IV we select a * random one to start and then increment the value for * each frame. This is an explicit tradeoff between * overhead and security. Given the basic insecurity of * WEP this seems worthwhile. */ /* * Skip 'bad' IVs from Fluhrer/Mantin/Shamir: * (B, 255, N) with 3 <= B < 16 and 0 <= N <= 255 */ iv = ctx->wc_iv; if ((iv & 0xff00) == 0xff00) { int B = (iv & 0xff0000) >> 16; if (3 <= B && B < 16) iv += 0x0100; }
int in_gif_output(struct ifnet *ifp, struct mbuf *m, int proto, uint8_t ecn) { GIF_RLOCK_TRACKER; struct gif_softc *sc = ifp->if_softc; struct ip *ip; int len; /* prepend new IP header */ len = sizeof(struct ip); #ifndef __NO_STRICT_ALIGNMENT if (proto == IPPROTO_ETHERIP) len += ETHERIP_ALIGN; #endif M_PREPEND(m, len, M_NOWAIT); if (m == NULL) return (ENOBUFS); #ifndef __NO_STRICT_ALIGNMENT if (proto == IPPROTO_ETHERIP) { len = mtod(m, vm_offset_t) & 3; KASSERT(len == 0 || len == ETHERIP_ALIGN, ("in_gif_output: unexpected misalignment")); m->m_data += len; m->m_len -= ETHERIP_ALIGN; } #endif ip = mtod(m, struct ip *); GIF_RLOCK(sc); if (sc->gif_family != AF_INET) { m_freem(m); GIF_RUNLOCK(sc); return (ENETDOWN); } bcopy(sc->gif_iphdr, ip, sizeof(struct ip)); GIF_RUNLOCK(sc); ip->ip_p = proto; /* version will be set in ip_output() */ ip->ip_ttl = V_ip_gif_ttl; ip->ip_len = htons(m->m_pkthdr.len); ip->ip_tos = ecn; return (ip_output(m, NULL, NULL, 0, NULL, NULL)); }
static int natm_usr_send(struct socket *so, int flags, struct mbuf *m, struct mbuf *nam, struct mbuf *control) { struct natmpcb *npcb; struct atm_pseudohdr *aph; int error = 0; int s = SPLSOFTNET(); int proto = so->so_proto->pr_protocol; npcb = (struct natmpcb *) so->so_pcb; if (npcb == NULL) { error = EINVAL; goto out; } if (control && control->m_len) { m_freem(control); m_freem(m); error = EINVAL; goto out; } /* * send the data. we must put an atm_pseudohdr on first */ M_PREPEND(m, sizeof(*aph), M_WAITOK); if (m == NULL) { error = ENOBUFS; goto out; } aph = mtod(m, struct atm_pseudohdr *); ATM_PH_VPI(aph) = npcb->npcb_vpi; ATM_PH_SETVCI(aph, npcb->npcb_vci); ATM_PH_FLAGS(aph) = (proto == PROTO_NATMAAL5) ? ATM_PH_AAL5 : 0; error = atm_output(npcb->npcb_ifp, m, NULL, NULL); out: splx(s); return (error); }
int ddp_output( struct mbuf *m, struct socket *so) { struct ddpehdr *deh; struct ddpcb *ddp = sotoddpcb( so ); #ifdef MAC mac_create_mbuf_from_socket(so, m); #endif M_PREPEND( m, sizeof( struct ddpehdr ), M_TRYWAIT ); deh = mtod( m, struct ddpehdr *); deh->deh_pad = 0; deh->deh_hops = 0; deh->deh_len = m->m_pkthdr.len; deh->deh_dnet = ddp->ddp_fsat.sat_addr.s_net; deh->deh_dnode = ddp->ddp_fsat.sat_addr.s_node; deh->deh_dport = ddp->ddp_fsat.sat_port; deh->deh_snet = ddp->ddp_lsat.sat_addr.s_net; deh->deh_snode = ddp->ddp_lsat.sat_addr.s_node; deh->deh_sport = ddp->ddp_lsat.sat_port; /* * The checksum calculation is done after all of the other bytes have * been filled in. */ if ( ddp_cksum ) { deh->deh_sum = at_cksum( m, sizeof( int )); } else { deh->deh_sum = 0; } deh->deh_bytes = htonl( deh->deh_bytes ); #ifdef NETATALK_DEBUG printf ("ddp_output: from %d.%d:%d to %d.%d:%d\n", ntohs(deh->deh_snet), deh->deh_snode, deh->deh_sport, ntohs(deh->deh_dnet), deh->deh_dnode, deh->deh_dport); #endif return( ddp_route( m, &ddp->ddp_route )); }
struct mbuf * ieee1394_fragment(struct ifnet *ifp, struct mbuf *m0, int maxsize, uint16_t etype) { struct ieee1394com *ic = (struct ieee1394com *)ifp; int totlen, fraglen, off; struct mbuf *m, **mp; struct ieee1394_fraghdr *ifh; struct ieee1394_unfraghdr *iuh; totlen = m0->m_pkthdr.len; if (totlen + sizeof(struct ieee1394_unfraghdr) <= maxsize) { M_PREPEND(m0, sizeof(struct ieee1394_unfraghdr), M_DONTWAIT); if (m0 == NULL) goto bad; iuh = mtod(m0, struct ieee1394_unfraghdr *); iuh->iuh_ft = 0; iuh->iuh_etype = etype; return m0; }
int ddp_output(struct mbuf *m, ...) { struct ddpcb *ddp; struct ddpehdr *deh; va_list ap; va_start(ap, m); ddp = va_arg(ap, struct ddpcb *); va_end(ap); M_PREPEND( m, sizeof( struct ddpehdr ), M_DONTWAIT ); if (!m) return (ENOBUFS); deh = mtod( m, struct ddpehdr *); deh->deh_pad = 0; deh->deh_hops = 0; deh->deh_len = m->m_pkthdr.len; deh->deh_dnet = ddp->ddp_fsat.sat_addr.s_net; deh->deh_dnode = ddp->ddp_fsat.sat_addr.s_node; deh->deh_dport = ddp->ddp_fsat.sat_port; deh->deh_snet = ddp->ddp_lsat.sat_addr.s_net; deh->deh_snode = ddp->ddp_lsat.sat_addr.s_node; deh->deh_sport = ddp->ddp_lsat.sat_port; /* * The checksum calculation is done after all of the other bytes have * been filled in. */ if ( ddp_cksum ) { deh->deh_sum = at_cksum( m, sizeof( int )); } else { deh->deh_sum = 0; } deh->deh_bytes = htonl( deh->deh_bytes ); return( ddp_route( m, &ddp->ddp_route )); }
void vlan_ether_ptap(struct bpf_if *bp, struct mbuf *m, uint16_t vlantag) { const struct ether_header *eh; struct ether_vlan_header evh; KASSERT(m->m_len >= ETHER_HDR_LEN, ("ether header is not contiguous!")); eh = mtod(m, const struct ether_header *); m_adj(m, ETHER_HDR_LEN); bcopy(eh, &evh, 2 * ETHER_ADDR_LEN); evh.evl_encap_proto = htons(ETHERTYPE_VLAN); evh.evl_tag = htons(vlantag); evh.evl_proto = eh->ether_type; bpf_ptap(bp, m, &evh, ETHER_HDR_LEN + EVL_ENCAPLEN); /* XXX assumes data was left intact */ M_PREPEND(m, ETHER_HDR_LEN, MB_WAIT); }
static void sscop_send_upper(struct sscop *sscop, void *p, enum sscop_aasig sig, struct SSCOP_MBUF_T *m, u_int arg) { node_p node = (node_p)p; struct priv *priv = NG_NODE_PRIVATE(node); int error; struct sscop_arg *a; if (sig == SSCOP_DATA_indication && priv->flow) sscop_window(priv->sscop, 1); if (priv->upper == NULL) { if (m != NULL) m_freem(m); priv->stats.aa_dropped++; return; } priv->stats.aa_signals++; if (sig == SSCOP_DATA_indication) priv->stats.data_delivered++; if (m == NULL) { MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return; m->m_len = sizeof(struct sscop_arg); m->m_pkthdr.len = m->m_len; } else { M_PREPEND(m, sizeof(struct sscop_arg), M_NOWAIT); if (m == NULL) return; } a = mtod(m, struct sscop_arg *); a->sig = sig; a->arg = arg; NG_SEND_DATA_ONLY(error, priv->upper, m); }
int ddp_pru_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct proc *p) { at_ddp_t *ddp = NULL; struct atpcb *pcb = (struct atpcb *)((so)->so_pcb); if (pcb == NULL) return (EINVAL); /* * Set type to MSG_DATA. Otherwise looped back packet is not * recognized by atp_input() and possibly other protocols. */ MCHTYPE(m, MSG_DATA); if (!(pcb->ddp_flags & DDPFLG_HDRINCL)) { /* prepend a DDP header */ M_PREPEND(m, DDP_X_HDR_SIZE, M_WAIT); ddp = mtod(m, at_ddp_t *); }
/* * sco_send(pcb, mbuf) * * Send data on SCO pcb. * * Gross hackage, we just output the packet directly onto the unit queue. * This will work fine for one channel per unit, but for more channels it * really needs fixing. We set the context so that when the packet is sent, * we can drop a record from the socket buffer. */ int sco_send(struct sco_pcb *pcb, struct mbuf *m) { hci_scodata_hdr_t *hdr; int plen; if (pcb->sp_link == NULL) { m_freem(m); return EINVAL; } plen = m->m_pkthdr.len; DPRINTFN(10, "%d bytes\n", plen); /* * This is a temporary limitation, as USB devices cannot * handle SCO packet sizes that are not an integer number * of Isochronous frames. See ubt(4) */ if (plen != pcb->sp_mtu) { m_freem(m); return EMSGSIZE; } M_PREPEND(m, sizeof(hci_scodata_hdr_t), M_DONTWAIT); if (m == NULL) return ENOMEM; hdr = mtod(m, hci_scodata_hdr_t *); hdr->type = HCI_SCO_DATA_PKT; hdr->con_handle = htole16(pcb->sp_link->hl_handle); hdr->length = plen; pcb->sp_pending++; M_SETCTX(m, pcb->sp_link); hci_output_sco(pcb->sp_link->hl_unit, m); return 0; }
static int natm_usr_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct thread *p) { struct natmpcb *npcb; struct atm_pseudohdr *aph; int error = 0; int proto = so->so_proto->pr_protocol; npcb = (struct natmpcb *)so->so_pcb; KASSERT(npcb != NULL, ("natm_usr_send: npcb == NULL")); NATM_LOCK(); if (control && control->m_len) { NATM_UNLOCK(); m_freem(control); m_freem(m); return (EINVAL); } /* * Send the data. We must put an atm_pseudohdr on first. */ M_PREPEND(m, sizeof(*aph), M_DONTWAIT); if (m == NULL) { NATM_UNLOCK(); m_freem(control); return (ENOBUFS); } aph = mtod(m, struct atm_pseudohdr *); ATM_PH_VPI(aph) = npcb->npcb_vpi; ATM_PH_SETVCI(aph, npcb->npcb_vci); ATM_PH_FLAGS(aph) = (proto == PROTO_NATMAAL5) ? ATM_PH_AAL5 : 0; error = atm_output(npcb->npcb_ifp, m, NULL, NULL); NATM_UNLOCK(); return (error); }
int udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, struct mbuf *control, struct thread *td) { u_int32_t ulen = m->m_pkthdr.len; u_int32_t plen = sizeof(struct udphdr) + ulen; struct ip6_hdr *ip6; struct udphdr *udp6; struct in6_addr *laddr, *faddr; u_short fport; int error = 0; struct ip6_pktopts opt, *stickyopt = in6p->in6p_outputopts; int priv; int af = AF_INET6, hlen = sizeof(struct ip6_hdr); int flags; struct sockaddr_in6 tmp; priv = !priv_check(td, PRIV_ROOT); /* 1 if privileged, 0 if not */ if (control) { if ((error = ip6_setpktoptions(control, &opt, in6p->in6p_outputopts, IPPROTO_UDP, priv)) != 0) goto release; in6p->in6p_outputopts = &opt; } if (addr6) { /* * IPv4 version of udp_output calls in_pcbconnect in this case, * which needs splnet and affects performance. * Since we saw no essential reason for calling in_pcbconnect, * we get rid of such kind of logic, and call in6_selectsrc * and in6_pcbsetport in order to fill in the local address * and the local port. */ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr6; if (sin6->sin6_port == 0) { error = EADDRNOTAVAIL; goto release; } if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr)) { /* how about ::ffff:0.0.0.0 case? */ error = EISCONN; goto release; } if (!prison_remote_ip(td, addr6)) { error = EAFNOSUPPORT; /* IPv4 only jail */ goto release; } /* protect *sin6 from overwrites */ tmp = *sin6; sin6 = &tmp; faddr = &sin6->sin6_addr; fport = sin6->sin6_port; /* allow 0 port */ if (IN6_IS_ADDR_V4MAPPED(faddr)) { if ((in6p->in6p_flags & IN6P_IPV6_V6ONLY)) { /* * I believe we should explicitly discard the * packet when mapped addresses are disabled, * rather than send the packet as an IPv6 one. * If we chose the latter approach, the packet * might be sent out on the wire based on the * default route, the situation which we'd * probably want to avoid. * (20010421 [email protected]) */ error = EINVAL; goto release; } else af = AF_INET; } /* KAME hack: embed scopeid */ if (in6_embedscope(&sin6->sin6_addr, sin6, in6p, NULL) != 0) { error = EINVAL; goto release; } if (!IN6_IS_ADDR_V4MAPPED(faddr)) { laddr = in6_selectsrc(sin6, in6p->in6p_outputopts, in6p->in6p_moptions, &in6p->in6p_route, &in6p->in6p_laddr, &error, NULL); } else laddr = &in6p->in6p_laddr; /* XXX */ if (laddr == NULL) { if (error == 0) error = EADDRNOTAVAIL; goto release; } if (in6p->in6p_lport == 0 && (error = in6_pcbsetport(laddr, in6p, td)) != 0) goto release; } else { if (IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr)) { error = ENOTCONN; goto release; } if (IN6_IS_ADDR_V4MAPPED(&in6p->in6p_faddr)) { if ((in6p->in6p_flags & IN6P_IPV6_V6ONLY)) { /* * XXX: this case would happen when the * application sets the V6ONLY flag after * connecting the foreign address. * Such applications should be fixed, * so we bark here. */ log(LOG_INFO, "udp6_output: IPV6_V6ONLY " "option was set for a connected socket\n"); error = EINVAL; goto release; } else af = AF_INET; } laddr = &in6p->in6p_laddr; faddr = &in6p->in6p_faddr; fport = in6p->in6p_fport; } if (af == AF_INET) hlen = sizeof(struct ip); /* * Calculate data length and get a mbuf * for UDP and IP6 headers. */ M_PREPEND(m, hlen + sizeof(struct udphdr), MB_DONTWAIT); if (m == NULL) { error = ENOBUFS; goto release; } /* * Stuff checksum and output datagram. */ udp6 = (struct udphdr *)(mtod(m, caddr_t) + hlen); udp6->uh_sport = in6p->in6p_lport; /* lport is always set in the PCB */ udp6->uh_dport = fport; if (plen <= 0xffff) udp6->uh_ulen = htons((u_short)plen); else udp6->uh_ulen = 0; udp6->uh_sum = 0; switch (af) { case AF_INET6: ip6 = mtod(m, struct ip6_hdr *); ip6->ip6_flow = in6p->in6p_flowinfo & IPV6_FLOWINFO_MASK; ip6->ip6_vfc &= ~IPV6_VERSION_MASK; ip6->ip6_vfc |= IPV6_VERSION; #if 0 /* ip6_plen will be filled in ip6_output. */ ip6->ip6_plen = htons((u_short)plen); #endif ip6->ip6_nxt = IPPROTO_UDP; ip6->ip6_hlim = in6_selecthlim(in6p, in6p->in6p_route.ro_rt ? in6p->in6p_route.ro_rt->rt_ifp : NULL); ip6->ip6_src = *laddr; ip6->ip6_dst = *faddr; if ((udp6->uh_sum = in6_cksum(m, IPPROTO_UDP, sizeof(struct ip6_hdr), plen)) == 0) { udp6->uh_sum = 0xffff; } flags = 0; udp6stat.udp6s_opackets++; error = ip6_output(m, in6p->in6p_outputopts, &in6p->in6p_route, flags, in6p->in6p_moptions, NULL, in6p); break; case AF_INET: error = EAFNOSUPPORT; goto release; } goto releaseopt; release: m_freem(m); releaseopt: if (control) { ip6_clearpktopts(in6p->in6p_outputopts, -1); in6p->in6p_outputopts = stickyopt; m_freem(control); } return (error); }
static void udp_send(netmsg_t msg) { struct socket *so = msg->send.base.nm_so; struct mbuf *m = msg->send.nm_m; struct sockaddr *dstaddr = msg->send.nm_addr; int pru_flags = msg->send.nm_flags; struct inpcb *inp = so->so_pcb; struct thread *td = msg->send.nm_td; int flags; struct udpiphdr *ui; int len = m->m_pkthdr.len; struct sockaddr_in *sin; /* really is initialized before use */ int error = 0, cpu; KKASSERT(msg->send.nm_control == NULL); logudp(send_beg, inp); if (inp == NULL) { error = EINVAL; goto release; } if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) { error = EMSGSIZE; goto release; } if (inp->inp_lport == 0) { /* unbound socket */ boolean_t forwarded; error = in_pcbbind(inp, NULL, td); if (error) goto release; /* * Need to call udp_send again, after this inpcb is * inserted into wildcard hash table. */ msg->send.base.lmsg.ms_flags |= MSGF_UDP_SEND; forwarded = udp_inswildcardhash(inp, &msg->send.base, 0); if (forwarded) { /* * The message is further forwarded, so we are * done here. */ logudp(send_inswildcard, inp); return; } } if (dstaddr != NULL) { /* destination address specified */ if (inp->inp_faddr.s_addr != INADDR_ANY) { /* already connected */ error = EISCONN; goto release; } sin = (struct sockaddr_in *)dstaddr; if (!prison_remote_ip(td, (struct sockaddr *)&sin)) { error = EAFNOSUPPORT; /* IPv6 only jail */ goto release; } } else { if (inp->inp_faddr.s_addr == INADDR_ANY) { /* no destination specified and not already connected */ error = ENOTCONN; goto release; } sin = NULL; } /* * Calculate data length and get a mbuf * for UDP and IP headers. */ M_PREPEND(m, sizeof(struct udpiphdr), M_NOWAIT); if (m == NULL) { error = ENOBUFS; goto release; } /* * Fill in mbuf with extended UDP header * and addresses and length put into network format. */ ui = mtod(m, struct udpiphdr *); bzero(ui->ui_x1, sizeof ui->ui_x1); /* XXX still needed? */ ui->ui_pr = IPPROTO_UDP; /* * Set destination address. */ if (dstaddr != NULL) { /* use specified destination */ ui->ui_dst = sin->sin_addr; ui->ui_dport = sin->sin_port; } else { /* use connected destination */ ui->ui_dst = inp->inp_faddr; ui->ui_dport = inp->inp_fport; } /* * Set source address. */ if (inp->inp_laddr.s_addr == INADDR_ANY || IN_MULTICAST(ntohl(inp->inp_laddr.s_addr))) { struct sockaddr_in *if_sin; if (dstaddr == NULL) { /* * connect() had (or should have) failed because * the interface had no IP address, but the * application proceeded to call send() anyways. */ error = ENOTCONN; goto release; } /* Look up outgoing interface. */ error = in_pcbladdr_find(inp, dstaddr, &if_sin, td, 1); if (error) goto release; ui->ui_src = if_sin->sin_addr; /* use address of interface */ } else { ui->ui_src = inp->inp_laddr; /* use non-null bound address */ } ui->ui_sport = inp->inp_lport; KASSERT(inp->inp_lport != 0, ("inp lport should have been bound")); /* * Release the original thread, since it is no longer used */ if (pru_flags & PRUS_HELDTD) { lwkt_rele(td); pru_flags &= ~PRUS_HELDTD; } /* * Free the dest address, since it is no longer needed */ if (pru_flags & PRUS_FREEADDR) { kfree(dstaddr, M_SONAME); pru_flags &= ~PRUS_FREEADDR; } ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr)); /* * Set up checksum and output datagram. */ if (udpcksum) { ui->ui_sum = in_pseudo(ui->ui_src.s_addr, ui->ui_dst.s_addr, htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP)); m->m_pkthdr.csum_flags = CSUM_UDP; m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); m->m_pkthdr.csum_thlen = sizeof(struct udphdr); } else {
int looutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct rtentry *rt) { pktqueue_t *pktq = NULL; struct ifqueue *ifq = NULL; int s, isr = -1; int csum_flags; size_t pktlen; MCLAIM(m, ifp->if_mowner); KASSERT(KERNEL_LOCKED_P()); if ((m->m_flags & M_PKTHDR) == 0) panic("looutput: no header mbuf"); if (ifp->if_flags & IFF_LOOPBACK) bpf_mtap_af(ifp, dst->sa_family, m); m->m_pkthdr.rcvif = ifp; if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) { m_freem(m); return (rt->rt_flags & RTF_BLACKHOLE ? 0 : rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH); } pktlen = m->m_pkthdr.len; ifp->if_opackets++; ifp->if_obytes += pktlen; #ifdef ALTQ /* * ALTQ on the loopback interface is just for debugging. It's * used only for loopback interfaces, not for a simplex interface. */ if ((ALTQ_IS_ENABLED(&ifp->if_snd) || TBR_IS_ENABLED(&ifp->if_snd)) && ifp->if_start == lostart) { struct altq_pktattr pktattr; int error; /* * If the queueing discipline needs packet classification, * do it before prepending the link headers. */ IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr); M_PREPEND(m, sizeof(uint32_t), M_DONTWAIT); if (m == NULL) return (ENOBUFS); *(mtod(m, uint32_t *)) = dst->sa_family; s = splnet(); IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error); (*ifp->if_start)(ifp); splx(s); return (error); } #endif /* ALTQ */ m_tag_delete_nonpersistent(m); #ifdef MPLS if (rt != NULL && rt_gettag(rt) != NULL && rt_gettag(rt)->sa_family == AF_MPLS && (m->m_flags & (M_MCAST | M_BCAST)) == 0) { union mpls_shim msh; msh.s_addr = MPLS_GETSADDR(rt); if (msh.shim.label != MPLS_LABEL_IMPLNULL) { ifq = &mplsintrq; isr = NETISR_MPLS; } } if (isr != NETISR_MPLS) #endif switch (dst->sa_family) { #ifdef INET case AF_INET: csum_flags = m->m_pkthdr.csum_flags; KASSERT((csum_flags & ~(M_CSUM_IPv4|M_CSUM_UDPv4)) == 0); if (csum_flags != 0 && IN_LOOPBACK_NEED_CHECKSUM(csum_flags)) { ip_undefer_csum(m, 0, csum_flags); } m->m_pkthdr.csum_flags = 0; pktq = ip_pktq; break; #endif #ifdef INET6 case AF_INET6: csum_flags = m->m_pkthdr.csum_flags; KASSERT((csum_flags & ~M_CSUM_UDPv6) == 0); if (csum_flags != 0 && IN6_LOOPBACK_NEED_CHECKSUM(csum_flags)) { ip6_undefer_csum(m, 0, csum_flags); } m->m_pkthdr.csum_flags = 0; m->m_flags |= M_LOOP; pktq = ip6_pktq; break; #endif #ifdef IPX case AF_IPX: ifq = &ipxintrq; isr = NETISR_IPX; break; #endif #ifdef NETATALK case AF_APPLETALK: ifq = &atintrq2; isr = NETISR_ATALK; break; #endif default: printf("%s: can't handle af%d\n", ifp->if_xname, dst->sa_family); m_freem(m); return (EAFNOSUPPORT); } s = splnet(); if (__predict_true(pktq)) { int error = 0; if (__predict_true(pktq_enqueue(pktq, m, 0))) { ifp->if_ipackets++; ifp->if_ibytes += pktlen; } else { m_freem(m); error = ENOBUFS; } splx(s); return error; } if (IF_QFULL(ifq)) { IF_DROP(ifq); m_freem(m); splx(s); return (ENOBUFS); } IF_ENQUEUE(ifq, m); schednetisr(isr); ifp->if_ipackets++; ifp->if_ibytes += m->m_pkthdr.len; splx(s); return (0); }
static int udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *td) { struct udpiphdr *ui; int len = m->m_pkthdr.len; struct in_addr faddr, laddr; struct cmsghdr *cm; struct inpcbinfo *pcbinfo; struct sockaddr_in *sin, src; int cscov_partial = 0; int error = 0; int ipflags; u_short fport, lport; int unlock_udbinfo; u_char tos; uint8_t pr; uint16_t cscov = 0; /* * udp_output() may need to temporarily bind or connect the current * inpcb. As such, we don't know up front whether we will need the * pcbinfo lock or not. Do any work to decide what is needed up * front before acquiring any locks. */ if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) { if (control) m_freem(control); m_freem(m); return (EMSGSIZE); } src.sin_family = 0; INP_RLOCK(inp); tos = inp->inp_ip_tos; if (control != NULL) { /* * XXX: Currently, we assume all the optional information is * stored in a single mbuf. */ if (control->m_next) { INP_RUNLOCK(inp); m_freem(control); m_freem(m); return (EINVAL); } for (; control->m_len > 0; control->m_data += CMSG_ALIGN(cm->cmsg_len), control->m_len -= CMSG_ALIGN(cm->cmsg_len)) { cm = mtod(control, struct cmsghdr *); if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0 || cm->cmsg_len > control->m_len) { error = EINVAL; break; } if (cm->cmsg_level != IPPROTO_IP) continue; switch (cm->cmsg_type) { case IP_SENDSRCADDR: if (cm->cmsg_len != CMSG_LEN(sizeof(struct in_addr))) { error = EINVAL; break; } bzero(&src, sizeof(src)); src.sin_family = AF_INET; src.sin_len = sizeof(src); src.sin_port = inp->inp_lport; src.sin_addr = *(struct in_addr *)CMSG_DATA(cm); break; case IP_TOS: if (cm->cmsg_len != CMSG_LEN(sizeof(u_char))) { error = EINVAL; break; } tos = *(u_char *)CMSG_DATA(cm); break; default: error = ENOPROTOOPT; break; } if (error) break; } m_freem(control); } if (error) { INP_RUNLOCK(inp); m_freem(m); return (error); } /* * Depending on whether or not the application has bound or connected * the socket, we may have to do varying levels of work. The optimal * case is for a connected UDP socket, as a global lock isn't * required at all. * * In order to decide which we need, we require stability of the * inpcb binding, which we ensure by acquiring a read lock on the * inpcb. This doesn't strictly follow the lock order, so we play * the trylock and retry game; note that we may end up with more * conservative locks than required the second time around, so later * assertions have to accept that. Further analysis of the number of * misses under contention is required. * * XXXRW: Check that hash locking update here is correct. */ pr = inp->inp_socket->so_proto->pr_protocol; pcbinfo = get_inpcbinfo(pr); sin = (struct sockaddr_in *)addr; if (sin != NULL && (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) { INP_RUNLOCK(inp); INP_WLOCK(inp); INP_HASH_WLOCK(pcbinfo); unlock_udbinfo = UH_WLOCKED; } else if ((sin != NULL && ( (sin->sin_addr.s_addr == INADDR_ANY) || (sin->sin_addr.s_addr == INADDR_BROADCAST) || (inp->inp_laddr.s_addr == INADDR_ANY) || (inp->inp_lport == 0))) || (src.sin_family == AF_INET)) { INP_HASH_RLOCK(pcbinfo); unlock_udbinfo = UH_RLOCKED; } else unlock_udbinfo = UH_UNLOCKED; /* * If the IP_SENDSRCADDR control message was specified, override the * source address for this datagram. Its use is invalidated if the * address thus specified is incomplete or clobbers other inpcbs. */ laddr = inp->inp_laddr; lport = inp->inp_lport; if (src.sin_family == AF_INET) { INP_HASH_LOCK_ASSERT(pcbinfo); if ((lport == 0) || (laddr.s_addr == INADDR_ANY && src.sin_addr.s_addr == INADDR_ANY)) { error = EINVAL; goto release; } error = in_pcbbind_setup(inp, (struct sockaddr *)&src, &laddr.s_addr, &lport, td->td_ucred); if (error) goto release; } /* * If a UDP socket has been connected, then a local address/port will * have been selected and bound. * * If a UDP socket has not been connected to, then an explicit * destination address must be used, in which case a local * address/port may not have been selected and bound. */ if (sin != NULL) { INP_LOCK_ASSERT(inp); if (inp->inp_faddr.s_addr != INADDR_ANY) { error = EISCONN; goto release; } /* * Jail may rewrite the destination address, so let it do * that before we use it. */ error = prison_remote_ip4(td->td_ucred, &sin->sin_addr); if (error) goto release; /* * If a local address or port hasn't yet been selected, or if * the destination address needs to be rewritten due to using * a special INADDR_ constant, invoke in_pcbconnect_setup() * to do the heavy lifting. Once a port is selected, we * commit the binding back to the socket; we also commit the * binding of the address if in jail. * * If we already have a valid binding and we're not * requesting a destination address rewrite, use a fast path. */ if (inp->inp_laddr.s_addr == INADDR_ANY || inp->inp_lport == 0 || sin->sin_addr.s_addr == INADDR_ANY || sin->sin_addr.s_addr == INADDR_BROADCAST) { INP_HASH_LOCK_ASSERT(pcbinfo); error = in_pcbconnect_setup(inp, addr, &laddr.s_addr, &lport, &faddr.s_addr, &fport, NULL, td->td_ucred); if (error) goto release; /* * XXXRW: Why not commit the port if the address is * !INADDR_ANY? */ /* Commit the local port if newly assigned. */ if (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0) { INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(pcbinfo); /* * Remember addr if jailed, to prevent * rebinding. */ if (prison_flag(td->td_ucred, PR_IP4)) inp->inp_laddr = laddr; inp->inp_lport = lport; if (in_pcbinshash(inp) != 0) { inp->inp_lport = 0; error = EAGAIN; goto release; } inp->inp_flags |= INP_ANONPORT; } } else { faddr = sin->sin_addr; fport = sin->sin_port; } } else { INP_LOCK_ASSERT(inp); faddr = inp->inp_faddr; fport = inp->inp_fport; if (faddr.s_addr == INADDR_ANY) { error = ENOTCONN; goto release; } } /* * Calculate data length and get a mbuf for UDP, IP, and possible * link-layer headers. Immediate slide the data pointer back forward * since we won't use that space at this layer. */ M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_NOWAIT); if (m == NULL) { error = ENOBUFS; goto release; } m->m_data += max_linkhdr; m->m_len -= max_linkhdr; m->m_pkthdr.len -= max_linkhdr; /* * Fill in mbuf with extended UDP header and addresses and length put * into network format. */ ui = mtod(m, struct udpiphdr *); bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */ ui->ui_pr = pr; ui->ui_src = laddr; ui->ui_dst = faddr; ui->ui_sport = lport; ui->ui_dport = fport; ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr)); if (pr == IPPROTO_UDPLITE) { struct udpcb *up; uint16_t plen; up = intoudpcb(inp); cscov = up->u_txcslen; plen = (u_short)len + sizeof(struct udphdr); if (cscov >= plen) cscov = 0; ui->ui_len = htons(plen); ui->ui_ulen = htons(cscov); /* * For UDP-Lite, checksum coverage length of zero means * the entire UDPLite packet is covered by the checksum. */ cscov_partial = (cscov == 0) ? 0 : 1; } else ui->ui_v = IPVERSION << 4; /* * Set the Don't Fragment bit in the IP header. */ if (inp->inp_flags & INP_DONTFRAG) { struct ip *ip; ip = (struct ip *)&ui->ui_i; ip->ip_off |= htons(IP_DF); } ipflags = 0; if (inp->inp_socket->so_options & SO_DONTROUTE) ipflags |= IP_ROUTETOIF; if (inp->inp_socket->so_options & SO_BROADCAST) ipflags |= IP_ALLOWBROADCAST; if (inp->inp_flags & INP_ONESBCAST) ipflags |= IP_SENDONES; #ifdef MAC mac_inpcb_create_mbuf(inp, m); #endif /* * Set up checksum and output datagram. */ ui->ui_sum = 0; if (cscov_partial) { if (inp->inp_flags & INP_ONESBCAST) faddr.s_addr = INADDR_BROADCAST; if ((ui->ui_sum = in_cksum(m, sizeof(struct ip) + cscov)) == 0) ui->ui_sum = 0xffff; } else if (V_udp_cksum || !cscov_partial) { if (inp->inp_flags & INP_ONESBCAST) faddr.s_addr = INADDR_BROADCAST; ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr, htons((u_short)len + sizeof(struct udphdr) + pr)); m->m_pkthdr.csum_flags = CSUM_UDP; m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); } ((struct ip *)ui)->ip_len = htons(sizeof(struct udpiphdr) + len); ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */ ((struct ip *)ui)->ip_tos = tos; /* XXX */ UDPSTAT_INC(udps_opackets); if (unlock_udbinfo == UH_WLOCKED) INP_HASH_WUNLOCK(pcbinfo); else if (unlock_udbinfo == UH_RLOCKED) INP_HASH_RUNLOCK(pcbinfo); UDP_PROBE(send, NULL, inp, &ui->ui_i, inp, &ui->ui_u); error = ip_output(m, inp->inp_options, NULL, ipflags, inp->inp_moptions, inp); if (unlock_udbinfo == UH_WLOCKED) INP_WUNLOCK(inp); else INP_RUNLOCK(inp); return (error); release: if (unlock_udbinfo == UH_WLOCKED) { INP_HASH_WUNLOCK(pcbinfo); INP_WUNLOCK(inp); } else if (unlock_udbinfo == UH_RLOCKED) { INP_HASH_RUNLOCK(pcbinfo); INP_RUNLOCK(inp); } else INP_RUNLOCK(inp); m_freem(m); return (error); }