/* * Remove hlen data at offset skip in the packet. This is used by * the protocols strip protocol headers and associated data (e.g. IV, * authenticator) on input. */ int m_striphdr(struct mbuf *m, int skip, int hlen) { struct mbuf *m1; int roff; /* Find beginning of header */ m1 = m_getptr(m, skip, &roff); if (m1 == NULL) return (EINVAL); /* Remove the header and associated data from the mbuf. */ if (roff == 0) { /* The header was at the beginning of the mbuf */ newipsecstat.ips_input_front++; m_adj(m1, hlen); if ((m1->m_flags & M_PKTHDR) == 0) m->m_pkthdr.len -= hlen; } else if (roff + hlen >= m1->m_len) { struct mbuf *mo; /* * Part or all of the header is at the end of this mbuf, * so first let's remove the remainder of the header from * the beginning of the remainder of the mbuf chain, if any. */ newipsecstat.ips_input_end++; if (roff + hlen > m1->m_len) { /* Adjust the next mbuf by the remainder */ m_adj(m1->m_next, roff + hlen - m1->m_len); /* The second mbuf is guaranteed not to have a pkthdr... */ m->m_pkthdr.len -= (roff + hlen - m1->m_len); } /* Now, let's unlink the mbuf chain for a second...*/ mo = m1->m_next; m1->m_next = NULL; /* ...and trim the end of the first part of the chain...sick */ m_adj(m1, -(m1->m_len - roff)); if ((m1->m_flags & M_PKTHDR) == 0) m->m_pkthdr.len -= (m1->m_len - roff); /* Finally, let's relink */ m1->m_next = mo; } else { /* * The header lies in the "middle" of the mbuf; copy * the remainder of the mbuf down over the header. */ newipsecstat.ips_input_middle++; bcopy(mtod(m1, u_char *) + roff + hlen, mtod(m1, u_char *) + roff, m1->m_len - (roff + hlen)); m1->m_len -= hlen; m->m_pkthdr.len -= hlen; } return (0); }
/* * Start output on the mpe interface. */ void mpestart(struct ifnet *ifp) { struct mbuf *m; struct sockaddr *sa = (struct sockaddr *)&mpedst; int s; sa_family_t af; struct rtentry *rt; for (;;) { s = splnet(); IFQ_DEQUEUE(&ifp->if_snd, m); splx(s); if (m == NULL) return; af = *mtod(m, sa_family_t *); m_adj(m, sizeof(af)); switch (af) { case AF_INET: bzero(sa, sizeof(struct sockaddr_in)); satosin(sa)->sin_family = af; satosin(sa)->sin_len = sizeof(struct sockaddr_in); bcopy(mtod(m, caddr_t), &satosin(sa)->sin_addr, sizeof(in_addr_t)); m_adj(m, sizeof(in_addr_t)); break; default: m_freem(m); continue; } rt = rtalloc1(sa, RT_REPORT, 0); if (rt == NULL) { /* no route give up */ m_freem(m); continue; } #if NBPFILTER > 0 if (ifp->if_bpf) { /* remove MPLS label before passing packet to bpf */ m->m_data += sizeof(struct shim_hdr); m->m_len -= sizeof(struct shim_hdr); m->m_pkthdr.len -= sizeof(struct shim_hdr); bpf_mtap_af(ifp->if_bpf, af, m, BPF_DIRECTION_OUT); m->m_data -= sizeof(struct shim_hdr); m->m_len += sizeof(struct shim_hdr); m->m_pkthdr.len += sizeof(struct shim_hdr); } #endif /* XXX lie, but mpls_output will only look at sa_family */ sa->sa_family = AF_MPLS; mpls_output(rt->rt_ifp, m, sa, rt); RTFREE(rt); } }
static int mpls_send_frame(struct mbuf *m, struct ifnet *ifp, const struct rtentry *rt) { union mpls_shim msh; int ret; msh.s_addr = MPLS_GETSADDR(rt); if (msh.shim.label == MPLS_LABEL_IMPLNULL || (m->m_flags & (M_MCAST | M_BCAST))) { m_adj(m, sizeof(union mpls_shim)); m->m_pkthdr.csum_flags = 0; } switch(ifp->if_type) { /* only these are supported for now */ case IFT_ETHER: case IFT_TUNNEL: case IFT_LOOP: #ifdef INET ret = ip_if_output(ifp, m, rt->rt_gateway, rt); #else ret = if_output_lock(ifp, ifp, m, rt->rt_gateway, rt); #endif return ret; break; default: return ENETUNREACH; } return 0; }
struct mbuf * xdrmbuf_getall(XDR *xdrs) { struct mbuf *m0, *m; KASSERT(xdrs->x_ops == &xdrmbuf_ops && xdrs->x_op == XDR_DECODE, ("xdrmbuf_append: invalid XDR stream")); m0 = (struct mbuf *) xdrs->x_base; m = (struct mbuf *) xdrs->x_private; if (m0 != m) { while (m0->m_next != m) m0 = m0->m_next; m0->m_next = NULL; xdrs->x_private = NULL; } else { xdrs->x_base = NULL; xdrs->x_private = NULL; } if (m) m_adj(m, xdrs->x_handy); else MGET(m, M_WAITOK, MT_DATA); return (m); }
/* * Process a received Ethernet packet; * the packet is in the mbuf chain m without * the ether header, which is provided separately. */ void ether_input(struct ifnet *ifp0, struct ether_header *eh, struct mbuf *m) { struct ifqueue *inq; u_int16_t etype; int s, llcfound = 0; struct llc *l; struct arpcom *ac; struct ifnet *ifp = ifp0; #if NTRUNK > 0 int i = 0; #endif #if NPPPOE > 0 struct ether_header *eh_tmp; #endif m_cluncount(m, 1); /* mark incoming routing table */ m->m_pkthdr.ph_rtableid = ifp->if_rdomain; if (eh == NULL) { eh = mtod(m, struct ether_header *); m_adj(m, ETHER_HDR_LEN); }
/* * subroutine of udp_input(), mainly for source code readability. * caller must properly init udp_ip6 and udp_in6 beforehand. */ static void udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off, struct sockaddr_in *udp_in) { struct mbuf *opts = NULL; int ret; KASSERT(INP_ISIPV4(last), ("not inet inpcb")); if (last->inp_flags & INP_CONTROLOPTS || last->inp_socket->so_options & SO_TIMESTAMP) ip_savecontrol(last, &opts, ip, n); m_adj(n, off); lwkt_gettoken(&last->inp_socket->so_rcv.ssb_token); ret = ssb_appendaddr(&last->inp_socket->so_rcv, (struct sockaddr *)udp_in, n, opts); lwkt_reltoken(&last->inp_socket->so_rcv.ssb_token); if (ret == 0) { m_freem(n); if (opts) m_freem(opts); udp_stat.udps_fullsock++; } else { sorwakeup(last->inp_socket); } }
static int ng_sscop_rcvupper(hook_p hook, item_p item) { struct priv *priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); struct sscop_arg a; struct mbuf *m; if (!priv->enabled) { NG_FREE_ITEM(item); return (EINVAL); } /* * If the lower layer is not connected allow to proceed. * The lower layer sending function will drop outgoing frames, * and the sscop will timeout any establish requests. */ NGI_GET_M(item, m); NG_FREE_ITEM(item); if (!(m->m_flags & M_PKTHDR)) { printf("no pkthdr\n"); m_freem(m); return (EINVAL); } if (m->m_len < (int)sizeof(a) && (m = m_pullup(m, sizeof(a))) == NULL) return (ENOBUFS); bcopy((caddr_t)mtod(m, struct sscop_arg *), &a, sizeof(a)); m_adj(m, sizeof(a)); return (sscop_aasig(priv->sscop, a.sig, m, a.arg)); }
static int ng_sscfu_rcvlower(hook_p hook, item_p item) { node_p node = NG_HOOK_NODE(hook); struct priv *priv = NG_NODE_PRIVATE(node); struct mbuf *m; struct sscop_arg a; if (!priv->enabled || priv->upper == NULL) { NG_FREE_ITEM(item); return (0); } NGI_GET_M(item, m); NG_FREE_ITEM(item); if (!(m->m_flags & M_PKTHDR)) { printf("no pkthdr\n"); m_freem(m); return (EINVAL); } /* * Strip of the SSCOP header. */ if (m->m_len < (int)sizeof(a) && (m = m_pullup(m, sizeof(a))) == NULL) return (ENOMEM); bcopy((caddr_t)mtod(m, struct sscop_arg *), &a, sizeof(a)); m_adj(m, sizeof(a)); sscfu_input(priv->sscf, a.sig, m, a.arg); return (0); }
static int ffec_setup_rxbuf(struct ffec_softc *sc, int idx, struct mbuf * m) { int error, nsegs; struct bus_dma_segment seg; /* * We need to leave at least ETHER_ALIGN bytes free at the beginning of * the buffer to allow the data to be re-aligned after receiving it (by * copying it backwards ETHER_ALIGN bytes in the same buffer). We also * have to ensure that the beginning of the buffer is aligned to the * hardware's requirements. */ m_adj(m, roundup(ETHER_ALIGN, FEC_RXBUF_ALIGN)); error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map, m, &seg, &nsegs, 0); if (error != 0) { return (error); } bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map, BUS_DMASYNC_PREREAD); sc->rxbuf_map[idx].mbuf = m; ffec_setup_rxdesc(sc, idx, seg.ds_addr); return (0); }
static int do_rx_iscsi_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; struct cpl_iscsi_data *cpl = mtod(m, struct cpl_iscsi_data *); u_int tid = GET_TID(cpl); struct toepcb *toep = lookup_tid(sc, tid); struct icl_cxgbei_pdu *icp = toep->ulpcb2; M_ASSERTPKTHDR(m); /* Must already have received the header (but not the data). */ MPASS(icp != NULL); MPASS(icp->pdu_flags == SBUF_ULP_FLAG_HDR_RCVD); MPASS(icp->ip.ip_data_mbuf == NULL); MPASS(icp->ip.ip_data_len == 0); m_adj(m, sizeof(*cpl)); icp->pdu_flags |= SBUF_ULP_FLAG_DATA_RCVD; icp->ip.ip_data_mbuf = m; icp->ip.ip_data_len = m->m_pkthdr.len; #if 0 CTR4(KTR_CXGBE, "%s: tid %u, cpl->len dlen %u, m->m_len dlen %u", __func__, tid, ntohs(cpl->len), m->m_len); #endif return (0); }
struct mbuf * arc_frag_next(struct ifnet *ifp) { struct arccom *ac; struct mbuf *m; struct arc_header *ah; ac = (struct arccom *)ifp->if_l2com; if ((m = ac->curr_frag) == 0) { int tfrags; /* dequeue new packet */ IF_DEQUEUE(&ifp->if_snd, m); if (m == 0) return 0; ah = mtod(m, struct arc_header *); if (!arc_isphds(ah->arc_type)) return m; ++ac->ac_seqid; /* make the seqid unique */ tfrags = (m->m_pkthdr.len + ARC_MAX_DATA - 1) / ARC_MAX_DATA; ac->fsflag = 2 * tfrags - 3; ac->sflag = 0; ac->rsflag = ac->fsflag; ac->arc_dhost = ah->arc_dhost; ac->arc_shost = ah->arc_shost; ac->arc_type = ah->arc_type; m_adj(m, ARC_HDRNEWLEN); ac->curr_frag = m; }
Static int url_newbuf(struct url_softc *sc, struct url_chain *c, struct mbuf *m) { struct mbuf *m_new = NULL; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("%s: no memory for rx list " "-- packet dropped!\n", USBDEVNAME(sc->sc_dev)); return (ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("%s: no memory for rx list " "-- packet dropped!\n", USBDEVNAME(sc->sc_dev)); m_freem(m_new); return (ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); c->url_mbuf = m_new; return (0); }
/* * DON'T use free_sent_buffers to drop the queue! */ static void alloc_rx_buffers(struct sbsh_softc *sc) { unsigned cur_rbd = sc->regs->LRDR & 0x7f; struct mbuf *m; while (sc->tail_rq != ((sc->head_rq - 1) & (RQLEN - 1))) { MGETHDR(m, M_NOWAIT, MT_DATA); if (!m) { if_printf (&sc->arpcom.ac_if, "unable to get mbuf.\n"); return; } if (SBNI16_MAX_FRAME > MHLEN) { MCLGET(m, M_NOWAIT); if (!(m->m_flags & M_EXT)) { m_freem(m); if_printf (&sc->arpcom.ac_if, "unable to get mbuf cluster.\n"); return; } m->m_pkthdr.len = m->m_len = MCLBYTES; } m_adj(m, 2); /* align ip on longword boundaries */ sc->rq[sc->tail_rq++] = m; sc->tail_rq &= (RQLEN - 1); sc->rbd[cur_rbd].address = vtophys(mtod(m, vm_offset_t)); sc->rbd[cur_rbd].length = 0; sc->regs->LRDR = cur_rbd = (cur_rbd + 1) & 0x7f; } }
void imxenet_recv(struct imxenet_softc *sc) { struct ifnet *ifp = &sc->sc_ac.ac_if; bus_dmamap_sync(sc->rbdma.dma_tag, sc->rbdma.dma_map, 0, sc->rbdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0, sc->rxdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); while (!(sc->rx_desc_base[sc->cur_rx].status & ENET_RXD_EMPTY)) { struct mbuf *m; m = imxenet_newbuf(); if (m == NULL) { ifp->if_ierrors++; goto done; } ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = sc->rx_desc_base[sc->cur_rx].data_length; m_adj(m, ETHER_ALIGN); memcpy(mtod(m, char *), sc->rx_buffer_base[sc->cur_rx].data, sc->rx_desc_base[sc->cur_rx].data_length); sc->rx_desc_base[sc->cur_rx].status |= ENET_RXD_EMPTY; sc->rx_desc_base[sc->cur_rx].data_length = 0; bus_dmamap_sync(sc->rbdma.dma_tag, sc->rbdma.dma_map, ENET_MAX_PKT_SIZE * sc->cur_rx, ENET_MAX_PKT_SIZE, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, sizeof(struct imxenet_buf_desc) * sc->cur_rx, sizeof(struct imxenet_buf_desc), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (sc->rx_desc_base[sc->cur_rx].status & ENET_RXD_WRAP) sc->cur_rx = 0; else sc->cur_rx++; /* push the packet up */ #if NBPFILTER > 0 if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); #endif ether_input_mbuf(ifp, m); } done: /* rx descriptors are ready */ HWRITE4(sc, ENET_RDAR, ENET_RDAR_RDAR); }
static int mpls_send_frame(struct mbuf *m, struct ifnet *ifp, struct rtentry *rt) { union mpls_shim msh; if ((rt->rt_flags & RTF_GATEWAY) == 0) return EHOSTUNREACH; rt->rt_use++; msh.s_addr = MPLS_GETSADDR(rt); if (msh.shim.label == MPLS_LABEL_IMPLNULL || (m->m_flags & (M_MCAST | M_BCAST))) { m_adj(m, sizeof(union mpls_shim)); m->m_pkthdr.csum_flags = 0; } switch(ifp->if_type) { /* only these are supported for now */ case IFT_ETHER: case IFT_TUNNEL: case IFT_LOOP: return (*ifp->if_output)(ifp, m, rt->rt_gateway, rt); break; default: return ENETUNREACH; } return 0; }
static int do_rx_iscsi_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; struct cxgbei_data *ci = sc->iscsi_ulp_softc; struct cpl_iscsi_data *cpl = mtod(m, struct cpl_iscsi_data *); u_int tid = GET_TID(cpl); struct toepcb *toep = lookup_tid(sc, tid); struct icl_cxgbei_pdu *icp = toep->ulpcb2; M_ASSERTPKTHDR(m); MPASS(m->m_pkthdr.len == be16toh(cpl->len) + sizeof(*cpl)); /* Must already have received the header (but not the data). */ MPASS(icp != NULL); MPASS(icp->icp_flags == ICPF_RX_HDR); MPASS(icp->ip.ip_data_mbuf == NULL); m_adj(m, sizeof(*cpl)); MPASS(icp->ip.ip_data_len == m->m_pkthdr.len); icp->icp_flags |= ICPF_RX_FLBUF; icp->ip.ip_data_mbuf = m; counter_u64_add(ci->fl_pdus, 1); counter_u64_add(ci->fl_bytes, m->m_pkthdr.len); #if 0 CTR3(KTR_CXGBE, "%s: tid %u, cpl->len %u", __func__, tid, be16toh(cpl->len)); #endif return (0); }
void acx100_proc_wep_rxbuf(struct acx_softc *sc, struct mbuf *m, int *len) { int mac_hdrlen; struct ieee80211_frame *f; /* * Strip leading IV and KID, and trailing CRC */ f = mtod(m, struct ieee80211_frame *); if ((f->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) mac_hdrlen = sizeof(struct ieee80211_frame_addr4); else mac_hdrlen = sizeof(struct ieee80211_frame); #define IEEEWEP_IVLEN (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN) #define IEEEWEP_EXLEN (IEEEWEP_IVLEN + IEEE80211_WEP_CRCLEN) *len = *len - IEEEWEP_EXLEN; /* Move MAC header toward frame body */ ovbcopy(f, (uint8_t *)f + IEEEWEP_IVLEN, mac_hdrlen); m_adj(m, IEEEWEP_IVLEN); #undef IEEEWEP_EXLEN #undef IEEEWEP_IVLEN }
void at1intr(netmsg_t msg) { struct mbuf *m = msg->packet.nm_packet; struct elaphdr *elhp, elh; get_mplock(); /* * Phase 1 packet handling */ if (m->m_len < SZ_ELAPHDR && ((m = m_pullup(m, SZ_ELAPHDR)) == 0)) { ddpstat.ddps_tooshort++; goto out; } /* * This seems a little dubious, but I don't know phase 1 so leave it. */ elhp = mtod(m, struct elaphdr *); m_adj(m, SZ_ELAPHDR); if (elhp->el_type == ELAP_DDPEXTEND) { ddp_input(m, m->m_pkthdr.rcvif, NULL, 1); } else { bcopy((caddr_t)elhp, (caddr_t)&elh, SZ_ELAPHDR); ddp_input(m, m->m_pkthdr.rcvif, &elh, 1); } out: rel_mplock(); /* msg was embedded in the mbuf, do not reply! */ }
/* * Decode an incoming frame coming from the switch */ static int ngfrm_decode(node_p node, item_p item) { const sc_p sc = NG_NODE_PRIVATE(node); char *data; int alen; u_int dlci = 0; int error = 0; int ctxnum; struct mbuf *m; NGI_GET_M(item, m); if (m->m_len < 4 && (m = m_pullup(m, 4)) == NULL) { error = ENOBUFS; goto out; } data = mtod(m, char *); if ((alen = sc->addrlen) == 0) { sc->addrlen = alen = ngfrm_addrlen(data); } switch (alen) { case 2: SHIFTIN(makeup + 0, data[0], dlci); SHIFTIN(makeup + 1, data[1], dlci); break; case 3: SHIFTIN(makeup + 0, data[0], dlci); SHIFTIN(makeup + 1, data[1], dlci); SHIFTIN(makeup + 3, data[2], dlci); /* 3 and 2 is correct */ break; case 4: SHIFTIN(makeup + 0, data[0], dlci); SHIFTIN(makeup + 1, data[1], dlci); SHIFTIN(makeup + 2, data[2], dlci); SHIFTIN(makeup + 3, data[3], dlci); break; default: error = EINVAL; goto out; } if (dlci > 1023) { error = EINVAL; goto out; } ctxnum = sc->ALT[dlci]; if ((ctxnum & CTX_VALID) && sc->channel[ctxnum &= CTX_VALUE].hook) { /* Send it */ m_adj(m, alen); NG_FWD_NEW_DATA(error, item, sc->channel[ctxnum].hook, m); return (error); } else { error = ENETDOWN; } out: NG_FREE_ITEM(item); NG_FREE_M(m); return (error); }
static struct ieee80211_node * rtwn_rx_frame(struct rtwn_softc *sc, struct mbuf *m, int8_t *rssi) { struct r92c_rx_stat stat; /* Imitate PCIe layout. */ m_copydata(m, 0, sizeof(struct r92c_rx_stat), (caddr_t)&stat); m_adj(m, sizeof(struct r92c_rx_stat)); return (rtwn_rx_common(sc, m, &stat, rssi)); }
static int udp6_append(struct inpcb *inp, struct mbuf *n, int off, struct sockaddr_in6 *fromsa) { struct socket *so; struct mbuf *opts; struct udpcb *up; INP_LOCK_ASSERT(inp); /* * Engage the tunneling protocol. */ up = intoudpcb(inp); if (up->u_tun_func != NULL) { in_pcbref(inp); INP_RUNLOCK(inp); (*up->u_tun_func)(n, off, inp, (struct sockaddr *)fromsa, up->u_tun_ctx); INP_RLOCK(inp); return (in_pcbrele_rlocked(inp)); } #ifdef IPSEC /* Check AH/ESP integrity. */ if (ipsec6_in_reject(n, inp)) { m_freem(n); return (0); } #endif /* IPSEC */ #ifdef MAC if (mac_inpcb_check_deliver(inp, n) != 0) { m_freem(n); return (0); } #endif opts = NULL; if (inp->inp_flags & INP_CONTROLOPTS || inp->inp_socket->so_options & SO_TIMESTAMP) ip6_savecontrol(inp, n, &opts); m_adj(n, off + sizeof(struct udphdr)); so = inp->inp_socket; SOCKBUF_LOCK(&so->so_rcv); if (sbappendaddr_locked(&so->so_rcv, (struct sockaddr *)fromsa, n, opts) == 0) { SOCKBUF_UNLOCK(&so->so_rcv); m_freem(n); if (opts) m_freem(opts); UDPSTAT_INC(udps_fullsock); } else sorwakeup_locked(so); return (0); }
struct mbuf * uether_newbuf(void) { struct mbuf *m_new; m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) return (NULL); m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_adj(m_new, ETHER_ALIGN); return (m_new); }
static int mpls_unlabel_inet(struct mbuf *m) { int s, iphlen; struct ip *iph; union mpls_shim *ms; struct ifqueue *inq; if (mpls_mapttl_inet || mpls_mapprec_inet) { /* get shim info */ ms = mtod(m, union mpls_shim *); ms->s_addr = ntohl(ms->s_addr); /* and get rid of it */ m_adj(m, sizeof(union mpls_shim)); /* get ip header */ if (m->m_len < sizeof (struct ip) && (m = m_pullup(m, sizeof(struct ip))) == NULL) return ENOBUFS; iph = mtod(m, struct ip *); iphlen = iph->ip_hl << 2; /* get it all */ if (m->m_len < iphlen) { if ((m = m_pullup(m, iphlen)) == NULL) return ENOBUFS; iph = mtod(m, struct ip *); } /* check ipsum */ if (in_cksum(m, iphlen) != 0) { m_freem(m); return EINVAL; } /* set IP ttl from MPLS ttl */ if (mpls_mapttl_inet) iph->ip_ttl = ms->shim.ttl; /* set IP Precedence from MPLS Exp */ if (mpls_mapprec_inet) { iph->ip_tos = (iph->ip_tos << 3) >> 3; iph->ip_tos |= ms->shim.exp << 5; } /* reset ipsum because we modified TTL and TOS */ iph->ip_sum = 0; iph->ip_sum = in_cksum(m, iphlen); } else
static inline struct mbuf * sdp_sock_queue_rcv_mb(struct socket *sk, struct mbuf *mb) { struct sdp_sock *ssk = sdp_sk(sk); struct sdp_bsdh *h; h = mtod(mb, struct sdp_bsdh *); #ifdef SDP_ZCOPY SDP_SKB_CB(mb)->seq = rcv_nxt(ssk); if (h->mid == SDP_MID_SRCAVAIL) { struct sdp_srcah *srcah = (struct sdp_srcah *)(h+1); struct rx_srcavail_state *rx_sa; ssk->srcavail_cancel_mseq = 0; ssk->rx_sa = rx_sa = RX_SRCAVAIL_STATE(mb) = kzalloc( sizeof(struct rx_srcavail_state), M_NOWAIT); rx_sa->mseq = ntohl(h->mseq); rx_sa->used = 0; rx_sa->len = mb_len = ntohl(srcah->len); rx_sa->rkey = ntohl(srcah->rkey); rx_sa->vaddr = be64_to_cpu(srcah->vaddr); rx_sa->flags = 0; if (ssk->tx_sa) { sdp_dbg_data(ssk->socket, "got RX SrcAvail while waiting " "for TX SrcAvail. waking up TX SrcAvail" "to be aborted\n"); wake_up(sk->sk_sleep); } atomic_add(mb->len, &ssk->rcv_nxt); sdp_dbg_data(sk, "queueing SrcAvail. mb_len = %d vaddr = %lld\n", mb_len, rx_sa->vaddr); } else #endif { atomic_add(mb->m_pkthdr.len, &ssk->rcv_nxt); } m_adj(mb, SDP_HEAD_SIZE); SOCKBUF_LOCK(&sk->so_rcv); if (unlikely(h->flags & SDP_OOB_PRES)) sdp_urg(ssk, mb); sbappend_locked(&sk->so_rcv, mb); sorwakeup_locked(sk); return mb; }
/* called only from irq */ static struct mbuf * sdp_process_rx_wc(struct sdp_sock *ssk, struct ib_wc *wc) { struct mbuf *mb; struct sdp_bsdh *h; struct socket *sk = ssk->socket; int mseq; mb = sdp_recv_completion(ssk, wc->wr_id); if (unlikely(!mb)) return NULL; if (unlikely(wc->status)) { if (ssk->qp_active && sk) { sdp_dbg(sk, "Recv completion with error. " "Status %d, vendor: %d\n", wc->status, wc->vendor_err); sdp_abort(sk); ssk->qp_active = 0; } m_freem(mb); return NULL; } sdp_dbg_data(sk, "Recv completion. ID %d Length %d\n", (int)wc->wr_id, wc->byte_len); if (unlikely(wc->byte_len < sizeof(struct sdp_bsdh))) { sdp_warn(sk, "SDP BUG! byte_len %d < %zd\n", wc->byte_len, sizeof(struct sdp_bsdh)); m_freem(mb); return NULL; } /* Use m_adj to trim the tail of data we didn't use. */ m_adj(mb, -(mb->m_pkthdr.len - wc->byte_len)); h = mtod(mb, struct sdp_bsdh *); SDP_DUMP_PACKET(ssk->socket, "RX", mb, h); ssk->rx_packets++; ssk->rx_bytes += mb->m_pkthdr.len; mseq = ntohl(h->mseq); atomic_set(&ssk->mseq_ack, mseq); if (mseq != (int)wc->wr_id) sdp_warn(sk, "SDP BUG! mseq %d != wrid %d\n", mseq, (int)wc->wr_id); return mb; }
static int faithoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct rtentry *rt) { pktqueue_t *pktq; size_t pktlen; int s, error; uint32_t af; if ((m->m_flags & M_PKTHDR) == 0) panic("faithoutput no HDR"); af = dst->sa_family; /* BPF write needs to be handled specially */ if (af == AF_UNSPEC) { af = *(mtod(m, int *)); m_adj(m, sizeof(int)); }
static uint32_t get_uint32(struct mbuf **mp) { struct mbuf *m = *mp; uint32_t n; if (m->m_len < sizeof(uint32_t)) { m = m_pullup(m, sizeof(uint32_t)); if (!m) { *mp = NULL; return (0); } } bcopy(mtod(m, uint32_t *), &n, sizeof(uint32_t)); m_adj(m, sizeof(uint32_t)); *mp = m; return (ntohl(n)); }
/* * New mbuf */ static int lgue_newbuf(struct lgue_softc *sc, int len, struct mbuf **m_buf) { struct ifnet *ifp; ifp = &sc->lgue_arpcom.ac_if; *m_buf = NULL; /* Allocate mbuf */ *m_buf = m_getcl(MB_DONTWAIT, MT_DATA, MT_HEADER); if (*m_buf == NULL) { if_printf(ifp, " no memory for rx buffer --- packet dropped!\n"); return(ENOBUFS); } (*m_buf)->m_len = (*m_buf)->m_pkthdr.len = MCLBYTES; m_adj(*m_buf, ETHER_ALIGN); return(0); }
Static void usbintr() { struct ether_header *eh; struct mbuf *m; struct usb_qdat *q; struct ifnet *ifp; int s; s = splimp(); /* Check the RX queue */ while(1) { IF_DEQUEUE(&usbq_rx, m); if (m == NULL) break; eh = mtod(m, struct ether_header *); q = (struct usb_qdat *)m->m_pkthdr.rcvif; ifp = q->ifp; m->m_pkthdr.rcvif = ifp; m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); /* Re-arm the receiver */ (*q->if_rxstart)(ifp); if (!IFQ_IS_EMPTY(&ifp->if_snd)) (*ifp->if_start)(ifp); } /* Check the TX queue */ while(1) { IF_DEQUEUE(&usbq_tx, m); if (m == NULL) break; ifp = m->m_pkthdr.rcvif; m_freem(m); if (!IFQ_IS_EMPTY(&ifp->if_snd)) (*ifp->if_start)(ifp); } splx(s); return; }
/* * Initialize an RX descriptor and attach an MBUF cluster. */ static int kr_newbuf(struct kr_softc *sc, int idx) { struct kr_desc *desc; struct kr_rxdesc *rxd; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, sizeof(uint64_t)); if (bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_rx_tag, sc->kr_cdata.kr_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); rxd = &sc->kr_cdata.kr_rxdesc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap); } map = rxd->rx_dmamap; rxd->rx_dmamap = sc->kr_cdata.kr_rx_sparemap; sc->kr_cdata.kr_rx_sparemap = map; bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; desc = rxd->desc; desc->kr_ca = segs[0].ds_addr; desc->kr_ctl |= KR_DMASIZE(segs[0].ds_len); rxd->saved_ca = desc->kr_ca ; rxd->saved_ctl = desc->kr_ctl ; return (0); }