void vlan_ether_ptap(struct bpf_if *bp, struct mbuf *m, uint16_t vlantag) { const struct ether_header *eh; struct ether_vlan_header evh; KASSERT(m->m_len >= ETHER_HDR_LEN, ("ether header is not contiguous!")); eh = mtod(m, const struct ether_header *); m_adj(m, ETHER_HDR_LEN); bcopy(eh, &evh, 2 * ETHER_ADDR_LEN); evh.evl_encap_proto = htons(ETHERTYPE_VLAN); evh.evl_tag = htons(vlantag); evh.evl_proto = eh->ether_type; bpf_ptap(bp, m, &evh, ETHER_HDR_LEN + EVL_ENCAPLEN); /* XXX assumes data was left intact */ M_PREPEND(m, ETHER_HDR_LEN, MB_WAIT); }
int gre_mobile_input(struct mbuf **mp, int *offp, int proto) { static const uint32_t af = AF_INET; struct mbuf *m = *mp; struct ip *ip = mtod(m, struct ip *); struct mobip_h *mip = mtod(m, struct mobip_h *); struct gre_softc *sc; u_char osrc = 0; int msiz, hlen; hlen = *offp; if ((sc = gre_lookup(m, IPPROTO_MOBILE)) == NULL) { /* No matching tunnel or tunnel is down. */ m_freem(m); return(IPPROTO_DONE); } sc->sc_if.if_ipackets++; sc->sc_if.if_ibytes += m->m_pkthdr.len; if(ntohs(mip->mh.proto) & MOB_H_SBIT) { osrc = 1; msiz = MOB_H_SIZ_L; mip->mi.ip_src.s_addr = mip->mh.osrc; } else { msiz = MOB_H_SIZ_S; } mip->mi.ip_dst.s_addr = mip->mh.odst; mip->mi.ip_p = (ntohs(mip->mh.proto) >> 8); if (gre_in_cksum((u_short*)&mip->mh,msiz) != 0) { m_freem(m); return(IPPROTO_DONE); } bcopy((caddr_t)(ip) + (ip->ip_hl << 2) + msiz, (caddr_t)(ip) + (ip->ip_hl << 2), m->m_len - msiz - (ip->ip_hl << 2)); m->m_len -= msiz; m->m_pkthdr.len -= msiz; /* * On FreeBSD, rip_input() supplies us with ip->ip_len * already converted into host byteorder and also decreases * it by the lengh of IP header, however, ip_input() expects * that this field is in the original format (network byteorder * and full size of IP packet), so that adjust accordingly. */ ip->ip_len = htons(ip->ip_len + sizeof(struct ip) - msiz); ip->ip_sum = 0; ip->ip_sum = in_cksum(m, (ip->ip_hl << 2)); if (sc->sc_if.if_bpf) bpf_ptap(sc->sc_if.if_bpf, m, &af, sizeof(af)); m->m_pkthdr.rcvif = &sc->sc_if; netisr_queue(NETISR_IP, m); return(IPPROTO_DONE); }
static int gre_input2(struct mbuf *m ,int hlen, u_char proto) { static const uint32_t af = AF_INET; struct greip *gip = mtod(m, struct greip *); int isr; struct gre_softc *sc; u_short flags; if ((sc = gre_lookup(m, proto)) == NULL) { /* No matching tunnel or tunnel is down. */ return (0); } sc->sc_if.if_ipackets++; sc->sc_if.if_ibytes += m->m_pkthdr.len; switch (proto) { case IPPROTO_GRE: hlen += sizeof (struct gre_h); /* process GRE flags as packet can be of variable len */ flags = ntohs(gip->gi_flags); /* Checksum & Offset are present */ if ((flags & GRE_CP) | (flags & GRE_RP)) hlen += 4; /* We don't support routing fields (variable length) */ if (flags & GRE_RP) return(0); if (flags & GRE_KP) hlen += 4; if (flags & GRE_SP) hlen +=4; switch (ntohs(gip->gi_ptype)) { /* ethertypes */ case ETHERTYPE_IP: case WCCP_PROTOCOL_TYPE: isr = NETISR_IP; break; #ifdef NETATALK case ETHERTYPE_ATALK: isr = NETISR_ATALK1; break; #endif case ETHERTYPE_IPV6: /* FALLTHROUGH */ default: /* others not yet supported */ return(0); } break; default: /* others not yet supported */ return(0); } m->m_data += hlen; m->m_len -= hlen; m->m_pkthdr.len -= hlen; if (sc->sc_if.if_bpf) bpf_ptap(sc->sc_if.if_bpf, m, &af, sizeof(af)); m->m_pkthdr.rcvif = &sc->sc_if; netisr_queue(isr, m); return(1); /* packet is done, no further processing needed */ }
/* * icoutput() */ static int icoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct rtentry *rt) { device_t icdev = devclass_get_device(ic_devclass, ifp->if_dunit); device_t parent = device_get_parent(icdev); struct ic_softc *sc = (struct ic_softc *)device_get_softc(icdev); int len, sent; struct mbuf *mm; u_char *cp; uint32_t hdr = dst->sa_family; ifp->if_flags |= IFF_RUNNING; crit_enter(); /* already sending? */ if (sc->ic_sending) { ifp->if_oerrors ++; goto error; } /* insert header */ bcopy ((char *)&hdr, sc->ic_obuf, ICHDRLEN); cp = sc->ic_obuf + ICHDRLEN; len = 0; mm = m; do { if (len + mm->m_len > sc->ic_if.if_mtu) { /* packet to large */ ifp->if_oerrors ++; goto error; } bcopy(mtod(mm,char *), cp, mm->m_len); cp += mm->m_len; len += mm->m_len; } while ((mm = mm->m_next)); if (ifp->if_bpf) bpf_ptap(ifp->if_bpf, m, &hdr, ICHDRLEN); sc->ic_sending = 1; m_freem(m); crit_exit(); /* send the packet */ if (iicbus_block_write(parent, sc->ic_addr, sc->ic_obuf, len + ICHDRLEN, &sent)) ifp->if_oerrors ++; else { ifp->if_opackets ++; ifp->if_obytes += len; } sc->ic_sending = 0; return (0); error: m_freem(m); crit_exit(); return(0); }
/* * The output routine. Takes a packet and encapsulates it in the protocol * given by sc->g_proto. See also RFC 1701 and RFC 2004 */ static int gre_output_serialized(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct rtentry *rt) { int error = 0; struct gre_softc *sc = ifp->if_softc; struct greip *gh; struct ip *ip; u_short etype = 0; struct mobile_h mob_h; struct route *ro; struct sockaddr_in *ro_dst; ASSERT_NETISR_NCPUS(mycpuid); /* * gre may cause infinite recursion calls when misconfigured. * We'll prevent this by introducing upper limit. */ if (++(sc->called) > max_gre_nesting) { kprintf("%s: gre_output: recursively called too many " "times(%d)\n", if_name(&sc->sc_if), sc->called); m_freem(m); error = EIO; /* is there better errno? */ goto end; } if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 0 || sc->g_src.s_addr == INADDR_ANY || sc->g_dst.s_addr == INADDR_ANY) { m_freem(m); error = ENETDOWN; goto end; } ro = &sc->route_pcpu[mycpuid]; ro_dst = (struct sockaddr_in *)&ro->ro_dst; if (ro->ro_rt != NULL && ((ro->ro_rt->rt_flags & RTF_UP) == 0 || ro_dst->sin_addr.s_addr != sc->g_dst.s_addr)) { RTFREE(ro->ro_rt); ro->ro_rt = NULL; } if (ro->ro_rt == NULL) { error = gre_compute_route(sc, ro); if (error) { m_freem(m); goto end; } } gh = NULL; ip = NULL; if (ifp->if_bpf) { bpf_gettoken(); if (ifp->if_bpf) { uint32_t af = dst->sa_family; bpf_ptap(ifp->if_bpf, m, &af, sizeof(af)); } bpf_reltoken(); } m->m_flags &= ~(M_BCAST|M_MCAST); if (sc->g_proto == IPPROTO_MOBILE) { if (dst->sa_family == AF_INET) { struct mbuf *m0; int msiz; ip = mtod(m, struct ip *); /* * RFC2004 specifies that fragmented datagrams shouldn't * be encapsulated. */ if (ip->ip_off & (IP_MF | IP_OFFMASK)) { m_freem(m); error = EINVAL; /* is there better errno? */ goto end; } memset(&mob_h, 0, MOB_H_SIZ_L); mob_h.proto = (ip->ip_p) << 8; mob_h.odst = ip->ip_dst.s_addr; ip->ip_dst.s_addr = sc->g_dst.s_addr; /* * If the packet comes from our host, we only change * the destination address in the IP header. * Else we also need to save and change the source */ if (in_hosteq(ip->ip_src, sc->g_src)) { msiz = MOB_H_SIZ_S; } else { mob_h.proto |= MOB_H_SBIT; mob_h.osrc = ip->ip_src.s_addr; ip->ip_src.s_addr = sc->g_src.s_addr; msiz = MOB_H_SIZ_L; } mob_h.proto = htons(mob_h.proto); mob_h.hcrc = gre_in_cksum((u_short *)&mob_h, msiz); if ((m->m_data - msiz) < m->m_pktdat) { /* need new mbuf */ MGETHDR(m0, M_NOWAIT, MT_HEADER); if (m0 == NULL) { m_freem(m); error = ENOBUFS; goto end; } m0->m_next = m; m->m_data += sizeof(struct ip); m->m_len -= sizeof(struct ip); m0->m_pkthdr.len = m->m_pkthdr.len + msiz; m0->m_len = msiz + sizeof(struct ip); m0->m_data += max_linkhdr; memcpy(mtod(m0, caddr_t), (caddr_t)ip, sizeof(struct ip)); m = m0; } else { /* we have some space left in the old one */ m->m_data -= msiz; m->m_len += msiz; m->m_pkthdr.len += msiz; bcopy(ip, mtod(m, caddr_t), sizeof(struct ip)); } ip = mtod(m, struct ip *); memcpy((caddr_t)(ip + 1), &mob_h, (unsigned)msiz); ip->ip_len = ntohs(ip->ip_len) + msiz; } else { /* AF_INET */ m_freem(m); error = EINVAL; goto end; } } else if (sc->g_proto == IPPROTO_GRE) {