void mec_rxintr(struct mec_softc *sc, uint32_t stat) { bus_space_tag_t st = sc->sc_st; bus_space_handle_t sh = sc->sc_sh; struct ifnet *ifp = &sc->sc_ac.ac_if; struct mbuf *m; struct mec_rxdesc *rxd; uint64_t rxstat; u_int len; int i, last; DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: called\n")); bus_space_write_8(st, sh, MEC_RX_ALIAS, 0); last = (stat & MEC_INT_RX_MCL_FIFO_ALIAS) >> 8; /* XXX does alias count mod 32 even if 16 descs are set up? */ last &= MEC_NRXDESC_MASK; if (stat & MEC_INT_RX_FIFO_UNDERFLOW) last = (last - 1) & MEC_NRXDESC_MASK; DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: rxptr %d last %d\n", sc->sc_rxptr, last)); for (i = sc->sc_rxptr; i != last; i = MEC_NEXTRX(i)) { MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD); rxd = &sc->sc_rxdesc[i]; rxstat = rxd->rxd_stat; DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: rxstat = 0x%llx, rxptr = %d\n", rxstat, i)); DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: rxfifo = 0x%x\n", (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO))); if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) { /* Status not received but fifo counted? Drop it! */ goto dropit; } len = rxstat & MEC_RXSTAT_LEN; if (len < ETHER_MIN_LEN || len > ETHER_MAX_LEN) { /* invalid length packet; drop it. */ DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: wrong packet\n")); dropit: ifp->if_ierrors++; rxd->rxd_stat = 0; MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD); bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i)); continue; } if (rxstat & (MEC_RXSTAT_BADPACKET | MEC_RXSTAT_LONGEVENT | MEC_RXSTAT_INVALID | MEC_RXSTAT_CRCERROR | MEC_RXSTAT_VIOLATION)) { printf("%s: mec_rxintr: status = 0x%llx\n", sc->sc_dev.dv_xname, rxstat); goto dropit; } /* * now allocate an mbuf (and possibly a cluster) to hold * the received packet. */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { printf("%s: unable to allocate RX mbuf\n", sc->sc_dev.dv_xname); goto dropit; } if (len > (MHLEN - ETHER_ALIGN)) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { printf("%s: unable to allocate RX cluster\n", sc->sc_dev.dv_xname); m_freem(m); m = NULL; goto dropit; } } /* * Note MEC chip seems to insert 2 byte paddingat the start of * RX buffer, but we copy whole buffer to avoid unaligned copy. */ MEC_RXBUFSYNC(sc, i, len + ETHER_ALIGN, BUS_DMASYNC_POSTREAD); memcpy(mtod(m, caddr_t), rxd->rxd_buf, ETHER_ALIGN + len - ETHER_CRC_LEN); MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD); m->m_data += ETHER_ALIGN; /* put RX buffer into FIFO again */ rxd->rxd_stat = 0; MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD); bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i)); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; ifp->if_ipackets++; #if NBPFILTER > 0 /* * Pass this up to any BPF listeners, but only * pass it up the stack it its for us. */ if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); #endif /* Pass it on. */ ether_input_mbuf(ifp, m); } /* update RX pointer */ sc->sc_rxptr = i; bus_space_write_8(st, sh, MEC_RX_ALIAS, (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) | MEC_DMA_RX_INT_ENABLE); }
/* * Input a Neighbor Solicitation Message. * * Based on RFC 2461 * Based on RFC 2462 (duplicate address detection) */ void nd6_ns_input(struct mbuf *m, int off, int icmp6len) { struct ifnet *ifp = m->m_pkthdr.rcvif; struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); struct nd_neighbor_solicit *nd_ns; struct in6_addr saddr6 = ip6->ip6_src; struct in6_addr daddr6 = ip6->ip6_dst; struct in6_addr taddr6; struct in6_addr myaddr6; char *lladdr = NULL; struct ifaddr *ifa = NULL; int lladdrlen = 0; int anycast = 0, proxy = 0, tentative = 0; int tlladdr; int rflag; union nd_opts ndopts; struct sockaddr_dl proxydl; char ip6bufs[INET6_ADDRSTRLEN], ip6bufd[INET6_ADDRSTRLEN]; rflag = (V_ip6_forwarding) ? ND_NA_FLAG_ROUTER : 0; if (ND_IFINFO(ifp)->flags & ND6_IFF_ACCEPT_RTADV && V_ip6_norbit_raif) rflag = 0; #ifndef PULLDOWN_TEST IP6_EXTHDR_CHECK(m, off, icmp6len,); nd_ns = (struct nd_neighbor_solicit *)((caddr_t)ip6 + off); #else IP6_EXTHDR_GET(nd_ns, struct nd_neighbor_solicit *, m, off, icmp6len); if (nd_ns == NULL) { ICMP6STAT_INC(icp6s_tooshort); return; } #endif ip6 = mtod(m, struct ip6_hdr *); /* adjust pointer for safety */ taddr6 = nd_ns->nd_ns_target; if (in6_setscope(&taddr6, ifp, NULL) != 0) goto bad; if (ip6->ip6_hlim != 255) { nd6log((LOG_ERR, "nd6_ns_input: invalid hlim (%d) from %s to %s on %s\n", ip6->ip6_hlim, ip6_sprintf(ip6bufs, &ip6->ip6_src), ip6_sprintf(ip6bufd, &ip6->ip6_dst), if_name(ifp))); goto bad; } if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) { /* dst has to be a solicited node multicast address. */ if (daddr6.s6_addr16[0] == IPV6_ADDR_INT16_MLL && /* don't check ifindex portion */ daddr6.s6_addr32[1] == 0 && daddr6.s6_addr32[2] == IPV6_ADDR_INT32_ONE && daddr6.s6_addr8[12] == 0xff) { ; /* good */ } else { nd6log((LOG_INFO, "nd6_ns_input: bad DAD packet " "(wrong ip6 dst)\n")); goto bad; } } else if (!V_nd6_onlink_ns_rfc4861) { struct sockaddr_in6 src_sa6; /* * According to recent IETF discussions, it is not a good idea * to accept a NS from an address which would not be deemed * to be a neighbor otherwise. This point is expected to be * clarified in future revisions of the specification. */ bzero(&src_sa6, sizeof(src_sa6)); src_sa6.sin6_family = AF_INET6; src_sa6.sin6_len = sizeof(src_sa6); src_sa6.sin6_addr = saddr6; if (nd6_is_addr_neighbor(&src_sa6, ifp) == 0) { nd6log((LOG_INFO, "nd6_ns_input: " "NS packet from non-neighbor\n")); goto bad; } } if (IN6_IS_ADDR_MULTICAST(&taddr6)) { nd6log((LOG_INFO, "nd6_ns_input: bad NS target (multicast)\n")); goto bad; } icmp6len -= sizeof(*nd_ns); nd6_option_init(nd_ns + 1, icmp6len, &ndopts); if (nd6_options(&ndopts) < 0) { nd6log((LOG_INFO, "nd6_ns_input: invalid ND option, ignored\n")); /* nd6_options have incremented stats */ goto freeit; } if (ndopts.nd_opts_src_lladdr) { lladdr = (char *)(ndopts.nd_opts_src_lladdr + 1); lladdrlen = ndopts.nd_opts_src_lladdr->nd_opt_len << 3; } if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src) && lladdr) { nd6log((LOG_INFO, "nd6_ns_input: bad DAD packet " "(link-layer address option)\n")); goto bad; } /* * Attaching target link-layer address to the NA? * (RFC 2461 7.2.4) * * NS IP dst is unicast/anycast MUST NOT add * NS IP dst is solicited-node multicast MUST add * * In implementation, we add target link-layer address by default. * We do not add one in MUST NOT cases. */ if (!IN6_IS_ADDR_MULTICAST(&daddr6)) tlladdr = 0; else tlladdr = 1; /* * Target address (taddr6) must be either: * (1) Valid unicast/anycast address for my receiving interface, * (2) Unicast address for which I'm offering proxy service, or * (3) "tentative" address on which DAD is being performed. */ /* (1) and (3) check. */ if (ifp->if_carp) ifa = (*carp_iamatch6_p)(ifp, &taddr6); if (ifa == NULL) ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, &taddr6); /* (2) check. */ if (ifa == NULL) { struct rtentry *rt; struct sockaddr_in6 tsin6; int need_proxy; #ifdef RADIX_MPATH struct route_in6 ro; #endif bzero(&tsin6, sizeof tsin6); tsin6.sin6_len = sizeof(struct sockaddr_in6); tsin6.sin6_family = AF_INET6; tsin6.sin6_addr = taddr6; #ifdef RADIX_MPATH bzero(&ro, sizeof(ro)); ro.ro_dst = tsin6; rtalloc_mpath((struct route *)&ro, RTF_ANNOUNCE); rt = ro.ro_rt; #else rt = rtalloc1((struct sockaddr *)&tsin6, 0, 0); #endif need_proxy = (rt && (rt->rt_flags & RTF_ANNOUNCE) != 0 && rt->rt_gateway->sa_family == AF_LINK); if (rt != NULL) { /* * Make a copy while we can be sure that rt_gateway * is still stable before unlocking to avoid lock * order problems. proxydl will only be used if * proxy will be set in the next block. */ if (need_proxy) proxydl = *SDL(rt->rt_gateway); RTFREE_LOCKED(rt); } if (need_proxy) { /* * proxy NDP for single entry */ ifa = (struct ifaddr *)in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); if (ifa) proxy = 1; } } if (ifa == NULL) { /* * We've got an NS packet, and we don't have that adddress * assigned for us. We MUST silently ignore it. * See RFC2461 7.2.3. */ goto freeit; } myaddr6 = *IFA_IN6(ifa); anycast = ((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_ANYCAST; tentative = ((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_TENTATIVE; if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DUPLICATED) goto freeit; if (lladdr && ((ifp->if_addrlen + 2 + 7) & ~7) != lladdrlen) { nd6log((LOG_INFO, "nd6_ns_input: lladdrlen mismatch for %s " "(if %d, NS packet %d)\n", ip6_sprintf(ip6bufs, &taddr6), ifp->if_addrlen, lladdrlen - 2)); goto bad; } if (IN6_ARE_ADDR_EQUAL(&myaddr6, &saddr6)) { nd6log((LOG_INFO, "nd6_ns_input: duplicate IP6 address %s\n", ip6_sprintf(ip6bufs, &saddr6))); goto freeit; } /* * We have neighbor solicitation packet, with target address equals to * one of my tentative address. * * src addr how to process? * --- --- * multicast of course, invalid (rejected in ip6_input) * unicast somebody is doing address resolution -> ignore * unspec dup address detection * * The processing is defined in RFC 2462. */ if (tentative) { /* * If source address is unspecified address, it is for * duplicate address detection. * * If not, the packet is for addess resolution; * silently ignore it. */ if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) nd6_dad_ns_input(ifa); goto freeit; } /* * If the source address is unspecified address, entries must not * be created or updated. * It looks that sender is performing DAD. Output NA toward * all-node multicast address, to tell the sender that I'm using * the address. * S bit ("solicited") must be zero. */ if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) { struct in6_addr in6_all; in6_all = in6addr_linklocal_allnodes; if (in6_setscope(&in6_all, ifp, NULL) != 0) goto bad; nd6_na_output(ifp, &in6_all, &taddr6, ((anycast || proxy || !tlladdr) ? 0 : ND_NA_FLAG_OVERRIDE) | rflag, tlladdr, proxy ? (struct sockaddr *)&proxydl : NULL); goto freeit; } nd6_cache_lladdr(ifp, &saddr6, lladdr, lladdrlen, ND_NEIGHBOR_SOLICIT, 0); nd6_na_output(ifp, &saddr6, &taddr6, ((anycast || proxy || !tlladdr) ? 0 : ND_NA_FLAG_OVERRIDE) | rflag | ND_NA_FLAG_SOLICITED, tlladdr, proxy ? (struct sockaddr *)&proxydl : NULL); freeit: if (ifa != NULL) ifa_free(ifa); m_freem(m); return; bad: nd6log((LOG_ERR, "nd6_ns_input: src=%s\n", ip6_sprintf(ip6bufs, &saddr6))); nd6log((LOG_ERR, "nd6_ns_input: dst=%s\n", ip6_sprintf(ip6bufs, &daddr6))); nd6log((LOG_ERR, "nd6_ns_input: tgt=%s\n", ip6_sprintf(ip6bufs, &taddr6))); ICMP6STAT_INC(icp6s_badns); if (ifa != NULL) ifa_free(ifa); m_freem(m); }
/* * Massage IPv4/IPv6 headers for AH processing. */ static int ah_massage_headers(struct mbuf **m0, int proto, int skip, int alg, int out) { struct mbuf *m = *m0; unsigned char *ptr; int off, count; #ifdef INET struct ip *ip; #endif /* INET */ #ifdef INET6 struct ip6_ext *ip6e; struct ip6_hdr ip6; int alloc, len, ad; #endif /* INET6 */ switch (proto) { #ifdef INET case AF_INET: /* * This is the least painful way of dealing with IPv4 header * and option processing -- just make sure they're in * contiguous memory. */ *m0 = m = m_pullup(m, skip); if (m == NULL) { DPRINTF(("%s: m_pullup failed\n", __func__)); return ENOBUFS; } /* Fix the IP header */ ip = mtod(m, struct ip *); if (V_ah_cleartos) ip->ip_tos = 0; ip->ip_ttl = 0; ip->ip_sum = 0; /* * On input, fix ip_len which has been byte-swapped * at ip_input(). */ if (!out) { ip->ip_len = htons(ip->ip_len + skip); if (alg == CRYPTO_MD5_KPDK || alg == CRYPTO_SHA1_KPDK) ip->ip_off = htons(ip->ip_off & IP_DF); else ip->ip_off = 0; } else { if (alg == CRYPTO_MD5_KPDK || alg == CRYPTO_SHA1_KPDK) ip->ip_off = htons(ntohs(ip->ip_off) & IP_DF); else ip->ip_off = 0; } ptr = mtod(m, unsigned char *) + sizeof(struct ip); /* IPv4 option processing */ for (off = sizeof(struct ip); off < skip;) { if (ptr[off] == IPOPT_EOL || ptr[off] == IPOPT_NOP || off + 1 < skip) ; else { DPRINTF(("%s: illegal IPv4 option length for " "option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } switch (ptr[off]) { case IPOPT_EOL: off = skip; /* End the loop. */ break; case IPOPT_NOP: off++; break; case IPOPT_SECURITY: /* 0x82 */ case 0x85: /* Extended security. */ case 0x86: /* Commercial security. */ case 0x94: /* Router alert */ case 0x95: /* RFC1770 */ /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } off += ptr[off + 1]; break; case IPOPT_LSRR: case IPOPT_SSRR: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } /* * On output, if we have either of the * source routing options, we should * swap the destination address of the * IP header with the last address * specified in the option, as that is * what the destination's IP header * will look like. */ if (out) bcopy(ptr + off + ptr[off + 1] - sizeof(struct in_addr), &(ip->ip_dst), sizeof(struct in_addr)); /* Fall through */ default: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } /* Zeroize all other options. */ count = ptr[off + 1]; bcopy(ipseczeroes, ptr, count); off += count; break; } /* Sanity check. */ if (off > skip) { DPRINTF(("%s: malformed IPv4 options header\n", __func__)); m_freem(m); return EINVAL; } } break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Ugly... */ /* Copy and "cook" the IPv6 header. */ m_copydata(m, 0, sizeof(ip6), (caddr_t) &ip6); /* We don't do IPv6 Jumbograms. */ if (ip6.ip6_plen == 0) { DPRINTF(("%s: unsupported IPv6 jumbogram\n", __func__)); m_freem(m); return EMSGSIZE; } ip6.ip6_flow = 0; ip6.ip6_hlim = 0; ip6.ip6_vfc &= ~IPV6_VERSION_MASK; ip6.ip6_vfc |= IPV6_VERSION; /* Scoped address handling. */ if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_src)) ip6.ip6_src.s6_addr16[1] = 0; if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_dst)) ip6.ip6_dst.s6_addr16[1] = 0; /* Done with IPv6 header. */ m_copyback(m, 0, sizeof(struct ip6_hdr), (caddr_t) &ip6); /* Let's deal with the remaining headers (if any). */ if (skip - sizeof(struct ip6_hdr) > 0) { if (m->m_len <= skip) { ptr = (unsigned char *) malloc( skip - sizeof(struct ip6_hdr), M_XDATA, M_NOWAIT); if (ptr == NULL) { DPRINTF(("%s: failed to allocate memory" "for IPv6 headers\n",__func__)); m_freem(m); return ENOBUFS; } /* * Copy all the protocol headers after * the IPv6 header. */ m_copydata(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); alloc = 1; } else { /* No need to allocate memory. */ ptr = mtod(m, unsigned char *) + sizeof(struct ip6_hdr); alloc = 0; } } else break; off = ip6.ip6_nxt & 0xff; /* Next header type. */ for (len = 0; len < skip - sizeof(struct ip6_hdr);) switch (off) { case IPPROTO_HOPOPTS: case IPPROTO_DSTOPTS: ip6e = (struct ip6_ext *) (ptr + len); /* * Process the mutable/immutable * options -- borrows heavily from the * KAME code. */ for (count = len + sizeof(struct ip6_ext); count < len + ((ip6e->ip6e_len + 1) << 3);) { if (ptr[count] == IP6OPT_PAD1) { count++; continue; /* Skip padding. */ } /* Sanity check. */ if (count > len + ((ip6e->ip6e_len + 1) << 3)) { m_freem(m); /* Free, if we allocated. */ if (alloc) free(ptr, M_XDATA); return EINVAL; } ad = ptr[count + 1]; /* If mutable option, zeroize. */ if (ptr[count] & IP6OPT_MUTABLE) bcopy(ipseczeroes, ptr + count, ptr[count + 1]); count += ad; /* Sanity check. */ if (count > skip - sizeof(struct ip6_hdr)) { m_freem(m); /* Free, if we allocated. */ if (alloc) free(ptr, M_XDATA); return EINVAL; } } /* Advance. */ len += ((ip6e->ip6e_len + 1) << 3); off = ip6e->ip6e_nxt; break; case IPPROTO_ROUTING: /* * Always include routing headers in * computation. */ ip6e = (struct ip6_ext *) (ptr + len); len += ((ip6e->ip6e_len + 1) << 3); off = ip6e->ip6e_nxt; break; default: DPRINTF(("%s: unexpected IPv6 header type %d", __func__, off)); if (alloc) free(ptr, M_XDATA); m_freem(m); return EINVAL; } /* Copyback and free, if we allocated. */ if (alloc) { m_copyback(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); free(ptr, M_XDATA); } break; #endif /* INET6 */ }
/* * ESP input processing, called (eventually) through the protocol switch. */ static int esp_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff) { struct auth_hash *esph; struct enc_xform *espx; struct tdb_ident *tdbi; struct tdb_crypto *tc; int plen, alen, hlen; struct m_tag *mtag; struct newesp *esp; struct cryptodesc *crde; struct cryptop *crp; IPSEC_ASSERT(sav != NULL, ("null SA")); IPSEC_ASSERT(sav->tdb_encalgxform != NULL, ("null encoding xform")); /* Valid IP Packet length ? */ if ( (skip&3) || (m->m_pkthdr.len&3) ){ DPRINTF(("%s: misaligned packet, skip %u pkt len %u", __func__, skip, m->m_pkthdr.len)); V_espstat.esps_badilen++; m_freem(m); return EINVAL; } /* XXX don't pullup, just copy header */ IP6_EXTHDR_GET(esp, struct newesp *, m, skip, sizeof (struct newesp)); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; /* Determine the ESP header length */ if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; /* Authenticator hash size */ if (esph != NULL) { switch (esph->type) { case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: alen = esph->hashsize/2; break; default: alen = AH_HMAC_HASHLEN; break; } }else alen = 0; /* * Verify payload length is multiple of encryption algorithm * block size. * * NB: This works for the null algorithm because the blocksize * is 4 and all packets must be 4-byte aligned regardless * of the algorithm. */ plen = m->m_pkthdr.len - (skip + hlen + alen); if ((plen & (espx->blocksize - 1)) || (plen <= 0)) { DPRINTF(("%s: payload of %d octets not a multiple of %d octets," " SA %s/%08lx\n", __func__, plen, espx->blocksize, ipsec_address(&sav->sah->saidx.dst), (u_long) ntohl(sav->spi))); V_espstat.esps_badilen++; m_freem(m); return EINVAL; } /* * Check sequence number. */ if (esph && sav->replay && !ipsec_chkreplay(ntohl(esp->esp_seq), sav)) { DPRINTF(("%s: packet replay check for %s\n", __func__, ipsec_logsastr(sav))); /*XXX*/ V_espstat.esps_replay++; m_freem(m); return ENOBUFS; /*XXX*/ } /* Update the counters */ V_espstat.esps_ibytes += m->m_pkthdr.len - (skip + hlen + alen); /* Find out if we've already done crypto */ for (mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_CRYPTO_DONE, NULL); mtag != NULL; mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_CRYPTO_DONE, mtag)) { tdbi = (struct tdb_ident *) (mtag + 1); if (tdbi->proto == sav->sah->saidx.proto && tdbi->spi == sav->spi && !bcmp(&tdbi->dst, &sav->sah->saidx.dst, sizeof(union sockaddr_union))) break; } /* Get crypto descriptors */ crp = crypto_getreq(esph && espx ? 2 : 1); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptors\n", __func__)); V_espstat.esps_crypto++; m_freem(m); return ENOBUFS; } /* Get IPsec-specific opaque pointer */ if (esph == NULL || mtag != NULL) tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto), M_XDATA, M_NOWAIT|M_ZERO); else tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto) + alen, M_XDATA, M_NOWAIT|M_ZERO); if (tc == NULL) { crypto_freereq(crp); DPRINTF(("%s: failed to allocate tdb_crypto\n", __func__)); V_espstat.esps_crypto++; m_freem(m); return ENOBUFS; } tc->tc_ptr = (caddr_t) mtag; if (esph) { struct cryptodesc *crda = crp->crp_desc; IPSEC_ASSERT(crda != NULL, ("null ah crypto descriptor")); /* Authentication descriptor */ crda->crd_skip = skip; crda->crd_len = m->m_pkthdr.len - (skip + alen); crda->crd_inject = m->m_pkthdr.len - alen; crda->crd_alg = esph->type; crda->crd_key = sav->key_auth->key_data; crda->crd_klen = _KEYBITS(sav->key_auth); /* Copy the authenticator */ if (mtag == NULL) m_copydata(m, m->m_pkthdr.len - alen, alen, (caddr_t) (tc + 1)); /* Chain authentication request */ crde = crda->crd_next; } else { crde = crp->crp_desc; } /* Crypto operation descriptor */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length */ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; crp->crp_buf = (caddr_t) m; crp->crp_callback = esp_input_cb; crp->crp_sid = sav->tdb_cryptoid; crp->crp_opaque = (caddr_t) tc; /* These are passed as-is to the callback */ tc->tc_spi = sav->spi; tc->tc_dst = sav->sah->saidx.dst; tc->tc_proto = sav->sah->saidx.proto; tc->tc_protoff = protoff; tc->tc_skip = skip; KEY_ADDREFSA(sav); tc->tc_sav = sav; /* Decryption descriptor */ if (espx) { IPSEC_ASSERT(crde != NULL, ("null esp crypto descriptor")); crde->crd_skip = skip + hlen; crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen); crde->crd_inject = skip + hlen - sav->ivlen; crde->crd_alg = espx->type; crde->crd_key = sav->key_enc->key_data; crde->crd_klen = _KEYBITS(sav->key_enc); /* XXX Rounds ? */ } if (mtag == NULL) return crypto_dispatch(crp); else return esp_input_cb(crp); }
/* * Send a command to the firmware. We try to implement the Linux * driver interface for the routine. * mostly from if_iwn (iwn_cmd()). * * For now, we always copy the first part and map the second one (if it exists). */ int iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd) { struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE]; struct iwm_tfd *desc; struct iwm_tx_data *txdata = NULL; struct iwm_device_cmd *cmd; struct mbuf *m; bus_dma_segment_t seg; bus_addr_t paddr; uint32_t addr_lo; int error = 0, i, paylen, off; int code; int async, wantresp; int group_id; int nsegs; size_t hdrlen, datasz; uint8_t *data; code = hcmd->id; async = hcmd->flags & IWM_CMD_ASYNC; wantresp = hcmd->flags & IWM_CMD_WANT_SKB; data = NULL; for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) { paylen += hcmd->len[i]; } /* if the command wants an answer, busy sc_cmd_resp */ if (wantresp) { KASSERT(!async, ("invalid async parameter")); while (sc->sc_wantresp != -1) msleep(&sc->sc_wantresp, &sc->sc_mtx, 0, "iwmcmdsl", 0); sc->sc_wantresp = ring->qid << 16 | ring->cur; IWM_DPRINTF(sc, IWM_DEBUG_CMD, "wantresp is %x\n", sc->sc_wantresp); } /* * Is the hardware still available? (after e.g. above wait). */ if (sc->sc_flags & IWM_FLAG_STOPPED) { error = ENXIO; goto out; } desc = &ring->desc[ring->cur]; txdata = &ring->data[ring->cur]; group_id = iwm_cmd_groupid(code); if (group_id != 0) { hdrlen = sizeof(cmd->hdr_wide); datasz = sizeof(cmd->data_wide); } else { hdrlen = sizeof(cmd->hdr); datasz = sizeof(cmd->data); } if (paylen > datasz) { size_t totlen; IWM_DPRINTF(sc, IWM_DEBUG_CMD, "large command paylen=%u len0=%u\n", paylen, hcmd->len[0]); /* Command is too large */ totlen = hdrlen + paylen; if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) { device_printf(sc->sc_dev, "firmware command too long (%zd bytes)\n", totlen); error = EINVAL; goto out; } m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE); if (m == NULL) { error = ENOBUFS; goto out; } m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; error = bus_dmamap_load_mbuf_sg(ring->data_dmat, txdata->map, m, &seg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "%s: can't map mbuf, error %d\n", __func__, error); m_freem(m); goto out; } txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */ cmd = mtod(m, struct iwm_device_cmd *); paddr = seg.ds_addr; } else {
static void smc_task_rx(void *context, int pending) { u_int packet, status, len; uint8_t *data; struct ifnet *ifp; struct smc_softc *sc; struct mbuf *m, *mhead, *mtail; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; mhead = mtail = NULL; SMC_LOCK(sc); packet = smc_read_1(sc, FIFO_RX); while ((packet & FIFO_EMPTY) == 0) { /* * Grab an mbuf and attach a cluster. */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { break; } MCLGET(m, M_NOWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); break; } /* * Point to the start of the packet. */ smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); /* * Grab status and packet length. */ status = smc_read_2(sc, DATA0); len = smc_read_2(sc, DATA0) & RX_LEN_MASK; len -= 6; if (status & RX_ODDFRM) len += 1; /* * Check for errors. */ if (status & (RX_TOOSHORT | RX_TOOLNG | RX_BADCRC | RX_ALGNERR)) { smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); ifp->if_ierrors++; m_freem(m); break; } /* * Set the mbuf up the way we want it. */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len + 2; /* XXX: Is this right? */ m_adj(m, ETHER_ALIGN); /* * Pull the packet out of the device. Make sure we're in the * right bank first as things may have changed while we were * allocating our mbuf. */ smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 4 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); data = mtod(m, uint8_t *); smc_read_multi_2(sc, DATA0, (uint16_t *)data, len >> 1); if (len & 1) { data += len & ~1; *data = smc_read_1(sc, DATA0); } /* * Tell the device we're done. */ smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); if (m == NULL) { break; } if (mhead == NULL) { mhead = mtail = m; m->m_next = NULL; } else { mtail->m_next = m; mtail = m; } packet = smc_read_1(sc, FIFO_RX); } sc->smc_mask |= RCV_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); SMC_UNLOCK(sc); while (mhead != NULL) { m = mhead; mhead = mhead->m_next; m->m_next = NULL; ifp->if_ipackets++; (*ifp->if_input)(ifp, m); } }
int bootpc_call( struct bootp_packet *call, struct bootp_packet *reply, /* output */ struct proc *procp) { struct socket *so; struct sockaddr_in *sin; struct mbuf *m, *nam; struct uio auio; struct iovec aio; int error, rcvflg, timo, secs, len; /* Free at end if not null. */ nam = NULL; /* * Create socket and set its recieve timeout. */ if ((error = socreate(AF_INET, &so, SOCK_DGRAM, 0,procp))) goto out; m = m_get(M_WAIT, MT_SOOPTS); if (m == NULL) { error = ENOBUFS; goto out; } else { struct timeval *tv; tv = mtod(m, struct timeval *); m->m_len = sizeof(*tv); tv->tv_sec = 1; tv->tv_usec = 0; if ((error = sosetopt(so, SOL_SOCKET, SO_RCVTIMEO, m))) goto out; } /* * Enable broadcast. */ { int *on; m = m_get(M_WAIT, MT_SOOPTS); if (m == NULL) { error = ENOBUFS; goto out; } on = mtod(m, int *); m->m_len = sizeof(*on); *on = 1; if ((error = sosetopt(so, SOL_SOCKET, SO_BROADCAST, m))) goto out; } /* * Bind the local endpoint to a bootp client port. */ m = m_getclr(M_WAIT, MT_SONAME); sin = mtod(m, struct sockaddr_in *); sin->sin_len = m->m_len = sizeof(*sin); sin->sin_family = AF_INET; sin->sin_addr.s_addr = INADDR_ANY; sin->sin_port = htons(IPPORT_BOOTPC); error = sobind(so, m); m_freem(m); if (error) { printf("bind failed\n"); goto out; } /* * Setup socket address for the server. */ nam = m_get(M_WAIT, MT_SONAME); if (nam == NULL) { error = ENOBUFS; goto out; } sin = mtod(nam, struct sockaddr_in *); sin-> sin_len = sizeof(*sin); sin-> sin_family = AF_INET; sin->sin_addr.s_addr = INADDR_BROADCAST; sin->sin_port = htons(IPPORT_BOOTPS); nam->m_len = sizeof(*sin); /* * Send it, repeatedly, until a reply is received, * but delay each re-send by an increasing amount. * If the delay hits the maximum, start complaining. */ for (timo=1; timo <= MAX_RESEND_DELAY; timo++) { /* Send BOOTP request (or re-send). */ aio.iov_base = (caddr_t) call; aio.iov_len = sizeof(*call); auio.uio_iov = &aio; auio.uio_iovcnt = 1; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_WRITE; auio.uio_offset = 0; auio.uio_resid = sizeof(*call); auio.uio_procp = procp; error = sosend(so, nam, &auio, NULL, NULL, 0); if (error) { printf("bootpc_call: sosend: %d\n", error); switch (error) { case ENOBUFS: /* No buffer space available */ case ENETUNREACH: /* Network is unreachable */ case ENETDOWN: /* Network interface is not configured */ case EHOSTDOWN: /* Host is down */ case EHOSTUNREACH: /* Host is unreachable */ case EMSGSIZE: /* Message too long */ /* This is a possibly transient error. We can still receive replies from previous attempts. */ break; default: goto out; } } /* * Wait for up to timo seconds for a reply. * The socket receive timeout was set to 1 second. */ secs = timo; while (secs > 0) { aio.iov_base = (caddr_t) reply; aio.iov_len = sizeof(*reply); auio.uio_iov = &aio; auio.uio_iovcnt = 1; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_READ; auio.uio_offset = 0; auio.uio_resid = sizeof(*reply); auio.uio_procp = procp; rcvflg = 0; error = soreceive(so, NULL, &auio, NULL, NULL, &rcvflg); if (error == EWOULDBLOCK) { secs--; call->secs=htons(ntohs(call->secs)+1); continue; } if (error) goto out; len = sizeof(*reply) - auio.uio_resid; /* Do we have the required number of bytes ? */ if (len < BOOTP_MIN_LEN) continue; /* Is it the right reply? */ if (reply->op != 2) continue; if (reply->xid != call->xid) continue; if (reply->hlen != call->hlen) continue; if (bcmp(reply->chaddr,call->chaddr,call->hlen)) continue; goto gotreply; /* break two levels */ } /* while secs */ } /* send/receive a number of times then return an error */ { uint32_t addr = ntohl(sin->sin_addr.s_addr); printf("BOOTP timeout for server %"PRIu32".%"PRIu32".%"PRIu32".%"PRIu32"\n", (addr >> 24) & 0xff, (addr >> 16) & 0xff, (addr >> 8) & 0xff, addr & 0xff); } error = ETIMEDOUT; goto out; gotreply: out: if (nam) m_freem(nam); soclose(so); return error; }
void igmp_input(struct mbuf *m, int iphlen) { register struct igmp *igmp; register struct ip *ip; register int igmplen; register struct ifnet *ifp = m->m_pkthdr.rcvif; register int minlen; register struct in_multi *inm; register struct in_ifaddr *ia; struct in_multistep step; struct router_info *rti; int timer; /** timer value in the igmp query header **/ ++igmpstat.igps_rcv_total; ip = mtod(m, struct ip *); igmplen = ip->ip_len; /* * Validate lengths */ if (igmplen < IGMP_MINLEN) { ++igmpstat.igps_rcv_tooshort; m_freem(m); return; } minlen = iphlen + IGMP_MINLEN; if ((m->m_flags & M_EXT || m->m_len < minlen) && (m = m_pullup(m, minlen)) == 0) { ++igmpstat.igps_rcv_tooshort; return; } /* * Validate checksum */ m->m_data += iphlen; m->m_len -= iphlen; igmp = mtod(m, struct igmp *); if (in_cksum(m, igmplen)) { ++igmpstat.igps_rcv_badsum; m_freem(m); return; } m->m_data -= iphlen; m->m_len += iphlen; ip = mtod(m, struct ip *); timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE; rti = find_rti(ifp); /* * In the IGMPv2 specification, there are 3 states and a flag. * * In Non-Member state, we simply don't have a membership record. * In Delaying Member state, our timer is running (inm->inm_timer) * In Idle Member state, our timer is not running (inm->inm_timer==0) * * The flag is inm->inm_state, it is set to IGMP_OTHERMEMBER if * we have heard a report from another member, or IGMP_IREPORTEDLAST * if I sent the last report. */ switch (igmp->igmp_type) { case IGMP_MEMBERSHIP_QUERY: ++igmpstat.igps_rcv_queries; if (ifp->if_flags & IFF_LOOPBACK) break; if (igmp->igmp_code == 0) { /* * Old router. Remember that the querier on this * interface is old, and set the timer to the * value in RFC 1112. */ rti->rti_type = IGMP_V1_ROUTER; rti->rti_time = 0; timer = IGMP_MAX_HOST_REPORT_DELAY * PR_FASTHZ; if (ip->ip_dst.s_addr != igmp_all_hosts_group || igmp->igmp_group.s_addr != 0) { ++igmpstat.igps_rcv_badqueries; m_freem(m); return; } } else { /* * New router. Simply do the new validity check. */ if (igmp->igmp_group.s_addr != 0 && !IN_MULTICAST(ntohl(igmp->igmp_group.s_addr))) { ++igmpstat.igps_rcv_badqueries; m_freem(m); return; } } /* * - Start the timers in all of our membership records * that the query applies to for the interface on * which the query arrived excl. those that belong * to the "all-hosts" group (224.0.0.1). * - Restart any timer that is already running but has * a value longer than the requested timeout. * - Use the value specified in the query message as * the maximum timeout. */ IN_FIRST_MULTI(step, inm); while (inm != NULL) { if (inm->inm_ifp == ifp && inm->inm_addr.s_addr != igmp_all_hosts_group && (igmp->igmp_group.s_addr == 0 || igmp->igmp_group.s_addr == inm->inm_addr.s_addr)) { if (inm->inm_timer == 0 || inm->inm_timer > timer) { inm->inm_timer = IGMP_RANDOM_DELAY(timer); igmp_timers_are_running = 1; } } IN_NEXT_MULTI(step, inm); } break; case IGMP_V1_MEMBERSHIP_REPORT: case IGMP_V2_MEMBERSHIP_REPORT: /* * For fast leave to work, we have to know that we are the * last person to send a report for this group. Reports * can potentially get looped back if we are a multicast * router, so discard reports sourced by me. */ IFP_TO_IA(ifp, ia); if (ia && ip->ip_src.s_addr == IA_SIN(ia)->sin_addr.s_addr) break; ++igmpstat.igps_rcv_reports; if (ifp->if_flags & IFF_LOOPBACK) break; if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr))) { ++igmpstat.igps_rcv_badreports; m_freem(m); return; } /* * KLUDGE: if the IP source address of the report has an * unspecified (i.e., zero) subnet number, as is allowed for * a booting host, replace it with the correct subnet number * so that a process-level multicast routing demon can * determine which subnet it arrived from. This is necessary * to compensate for the lack of any way for a process to * determine the arrival interface of an incoming packet. */ if ((ntohl(ip->ip_src.s_addr) & IN_CLASSA_NET) == 0) if (ia) ip->ip_src.s_addr = htonl(ia->ia_subnet); /* * If we belong to the group being reported, stop * our timer for that group. */ IN_LOOKUP_MULTI(igmp->igmp_group, ifp, inm); if (inm != NULL) { inm->inm_timer = 0; ++igmpstat.igps_rcv_ourreports; inm->inm_state = IGMP_OTHERMEMBER; } break; } /* * Pass all valid IGMP packets up to any process(es) listening * on a raw IGMP socket. */ rip_input(m, iphlen); }
static int udp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *td) { struct inpcb *inp; struct inpcbinfo *pcbinfo; int error = 0; pcbinfo = get_inpcbinfo(so->so_proto->pr_protocol); inp = sotoinpcb(so); KASSERT(inp != NULL, ("udp6_send: inp == NULL")); INP_WLOCK(inp); if (addr) { if (addr->sa_len != sizeof(struct sockaddr_in6)) { error = EINVAL; goto bad; } if (addr->sa_family != AF_INET6) { error = EAFNOSUPPORT; goto bad; } } #ifdef INET if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) { int hasv4addr; struct sockaddr_in6 *sin6 = 0; if (addr == 0) hasv4addr = (inp->inp_vflag & INP_IPV4); else { sin6 = (struct sockaddr_in6 *)addr; hasv4addr = IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr) ? 1 : 0; } if (hasv4addr) { struct pr_usrreqs *pru; /* * XXXRW: We release UDP-layer locks before calling * udp_send() in order to avoid recursion. However, * this does mean there is a short window where inp's * fields are unstable. Could this lead to a * potential race in which the factors causing us to * select the UDPv4 output routine are invalidated? */ INP_WUNLOCK(inp); if (sin6) in6_sin6_2_sin_in_sock(addr); pru = inetsw[ip_protox[IPPROTO_UDP]].pr_usrreqs; /* addr will just be freed in sendit(). */ return ((*pru->pru_send)(so, flags, m, addr, control, td)); } } #endif #ifdef MAC mac_inpcb_create_mbuf(inp, m); #endif INP_HASH_WLOCK(pcbinfo); error = udp6_output(inp, m, addr, control, td); INP_HASH_WUNLOCK(pcbinfo); #ifdef INET #endif INP_WUNLOCK(inp); return (error); bad: INP_WUNLOCK(inp); m_freem(m); return (error); }
/* * Interface exists: make available by filling in network interface * record. System will initialize the interface when it is ready * to accept packets. */ void qeattach(struct device *parent, struct device *self, void *aux) { struct uba_attach_args *ua = aux; struct uba_softc *ubasc = (struct uba_softc *)parent; struct qe_softc *sc = (struct qe_softc *)self; struct ifnet *ifp = (struct ifnet *)&sc->sc_if; struct qe_ring *rp; int i, error; sc->sc_iot = ua->ua_iot; sc->sc_ioh = ua->ua_ioh; sc->sc_dmat = ua->ua_dmat; /* * Allocate DMA safe memory for descriptors and setup memory. */ sc->sc_ui.ui_size = sizeof(struct qe_cdata); if ((error = ubmemalloc((struct uba_softc *)parent, &sc->sc_ui, 0))) { printf(": unable to ubmemalloc(), error = %d\n", error); return; } sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr; sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr; /* * Zero the newly allocated memory. */ bzero(sc->sc_qedata, sizeof(struct qe_cdata)); /* * Create the transmit descriptor DMA maps. We take advantage * of the fact that the Qbus address space is big, and therefore * allocate map registers for all transmit descriptors also, * so that we can avoid this each time we send a packet. */ for (i = 0; i < TXDESCS; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &sc->sc_xmtmap[i]))) { printf(": unable to create tx DMA map %d, error = %d\n", i, error); goto fail_4; } } /* * Create receive buffer DMA maps. */ for (i = 0; i < RXDESCS; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rcvmap[i]))) { printf(": unable to create rx DMA map %d, error = %d\n", i, error); goto fail_5; } } /* * Pre-allocate the receive buffers. */ for (i = 0; i < RXDESCS; i++) { if ((error = qe_add_rxbuf(sc, i)) != 0) { printf(": unable to allocate or map rx buffer %d\n," " error = %d\n", i, error); goto fail_6; } } /* * Create ring loops of the buffer chains. * This is only done once. */ rp = sc->sc_qedata->qc_recv; rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]); rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) | QE_VALID | QE_CHAIN; rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET; rp = sc->sc_qedata->qc_xmit; rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]); rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) | QE_VALID | QE_CHAIN; rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET; /* * Get the vector that were set at match time, and remember it. */ sc->sc_intvec = ubasc->uh_lastiv; QE_WCSR(QE_CSR_CSR, QE_RESET); DELAY(1000); QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET); /* * Read out ethernet address and tell which type this card is. */ for (i = 0; i < 6; i++) sc->sc_ac.ac_enaddr[i] = QE_RCSR(i * 2) & 0xff; QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1); printf(": %s, address %s\n", QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa" : "deqna", ether_sprintf(sc->sc_ac.ac_enaddr)); QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */ uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr, sc, &sc->sc_intrcnt); sc->sc_cvec = ua->ua_cvec; evcount_attach(&sc->sc_intrcnt, sc->sc_dev.dv_xname, &sc->sc_cvec); strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = qestart; ifp->if_ioctl = qeioctl; ifp->if_watchdog = qetimeout; IFQ_SET_READY(&ifp->if_snd); /* * Attach the interface. */ if_attach(ifp); ether_ifattach(ifp); return; /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_6: for (i = 0; i < RXDESCS; i++) { if (sc->sc_rxmbuf[i] != NULL) { bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]); m_freem(sc->sc_rxmbuf[i]); } } fail_5: for (i = 0; i < RXDESCS; i++) { if (sc->sc_xmtmap[i] != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]); } fail_4: for (i = 0; i < TXDESCS; i++) { if (sc->sc_rcvmap[i] != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]); } }
static void qeintr(void *arg) { struct qe_softc *sc = arg; struct qe_cdata *qc = sc->sc_qedata; struct ifnet *ifp = &sc->sc_if; struct ether_header *eh; struct mbuf *m; int csr, status1, status2, len; csr = QE_RCSR(QE_CSR_CSR); QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT | QE_ILOOP); if (csr & QE_RCV_INT) while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) { status1 = qc->qc_recv[sc->sc_nextrx].qe_status1; status2 = qc->qc_recv[sc->sc_nextrx].qe_status2; m = sc->sc_rxmbuf[sc->sc_nextrx]; len = ((status1 & QE_RBL_HI) | (status2 & QE_RBL_LO)) + 60; qe_add_rxbuf(sc, sc->sc_nextrx); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; if (++sc->sc_nextrx == RXDESCS) sc->sc_nextrx = 0; eh = mtod(m, struct ether_header *); #if NBPFILTER > 0 if (ifp->if_bpf) { bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); if ((ifp->if_flags & IFF_PROMISC) != 0 && bcmp(sc->sc_ac.ac_enaddr, eh->ether_dhost, ETHER_ADDR_LEN) != 0 && ((eh->ether_dhost[0] & 1) == 0)) { m_freem(m); continue; } } #endif /* * ALLMULTI means PROMISC in this driver. */ if ((ifp->if_flags & IFF_ALLMULTI) && ((eh->ether_dhost[0] & 1) == 0) && bcmp(sc->sc_ac.ac_enaddr, eh->ether_dhost, ETHER_ADDR_LEN)) { m_freem(m); continue; } if ((status1 & QE_ESETUP) == 0) ether_input_mbuf(ifp, m); else m_freem(m); } if (csr & (QE_XMIT_INT|QE_XL_INVALID)) { while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) { int idx = sc->sc_lastack; sc->sc_inq--; if (++sc->sc_lastack == TXDESCS) sc->sc_lastack = 0; /* XXX collect statistics */ qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID; qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET; if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP) continue; bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]); if (sc->sc_txmbuf[idx]) { m_freem(sc->sc_txmbuf[idx]); sc->sc_txmbuf[idx] = 0; } } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; qestart(ifp); /* Put in more in queue */ } /* * How can the receive list get invalid??? * Verified that it happens anyway. */ if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) && (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) { QE_WCSR(QE_CSR_RCLL, LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx])); QE_WCSR(QE_CSR_RCLH, HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx])); } }
/* * Massage IPv4/IPv6 headers for AH processing. */ static int ah_massage_headers(struct mbuf **m0, int proto, int skip, int alg, int out) { struct mbuf *m = *m0; unsigned char *ptr; int off, count; #ifdef INET struct ip *ip; #endif /* INET */ #ifdef INET6 struct ip6_ext *ip6e; struct ip6_hdr ip6; int alloc, ad, nxt; #endif /* INET6 */ switch (proto) { #ifdef INET case AF_INET: /* * This is the least painful way of dealing with IPv4 header * and option processing -- just make sure they're in * contiguous memory. */ *m0 = m = m_pullup(m, skip); if (m == NULL) { DPRINTF(("ah_massage_headers: m_pullup failed\n")); return ENOBUFS; } /* Fix the IP header */ ip = mtod(m, struct ip *); if (ip4_ah_cleartos) ip->ip_tos = 0; ip->ip_ttl = 0; ip->ip_sum = 0; ip->ip_off = htons(ntohs(ip->ip_off) & ip4_ah_offsetmask); /* * On FreeBSD, ip_off and ip_len assumed in host endian; * they are converted (if necessary) by ip_input(). * On NetBSD, ip_off and ip_len are in network byte order. * They must be massaged back to network byte order * before verifying the HMAC. Moreover, on FreeBSD, * we should add `skip' back into the massaged ip_len * (presumably ip_input() deducted it before we got here?) * whereas on NetBSD, we should not. */ #ifdef __FreeBSD__ #define TOHOST(x) (x) #else #define TOHOST(x) (ntohs(x)) #endif if (!out) { u_int16_t inlen = TOHOST(ip->ip_len); #ifdef __FreeBSD__ ip->ip_len = htons(inlen + skip); #else /*!__FreeBSD__ */ ip->ip_len = htons(inlen); #endif /*!__FreeBSD__ */ if (alg == CRYPTO_MD5_KPDK || alg == CRYPTO_SHA1_KPDK) ip->ip_off &= IP_OFF_CONVERT(IP_DF); else ip->ip_off = 0; } else { if (alg == CRYPTO_MD5_KPDK || alg == CRYPTO_SHA1_KPDK) ip->ip_off &= IP_OFF_CONVERT(IP_DF); else ip->ip_off = 0; } ptr = mtod(m, unsigned char *); /* IPv4 option processing */ for (off = sizeof(struct ip); off < skip;) { if (ptr[off] == IPOPT_EOL || ptr[off] == IPOPT_NOP || off + 1 < skip) ; else { DPRINTF(("ah_massage_headers: illegal IPv4 " "option length for option %d\n", ptr[off])); m_freem(m); return EINVAL; } switch (ptr[off]) { case IPOPT_EOL: off = skip; /* End the loop. */ break; case IPOPT_NOP: off++; break; case IPOPT_SECURITY: /* 0x82 */ case 0x85: /* Extended security. */ case 0x86: /* Commercial security. */ case 0x94: /* Router alert */ case 0x95: /* RFC1770 */ /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("ah_massage_headers: " "illegal IPv4 option length for " "option %d\n", ptr[off])); m_freem(m); return EINVAL; } off += ptr[off + 1]; break; case IPOPT_LSRR: case IPOPT_SSRR: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("ah_massage_headers: " "illegal IPv4 option length for " "option %d\n", ptr[off])); m_freem(m); return EINVAL; } /* * On output, if we have either of the * source routing options, we should * swap the destination address of the * IP header with the last address * specified in the option, as that is * what the destination's IP header * will look like. */ if (out) bcopy(ptr + off + ptr[off + 1] - sizeof(struct in_addr), &(ip->ip_dst), sizeof(struct in_addr)); /* Fall through */ default: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("ah_massage_headers: " "illegal IPv4 option length for " "option %d\n", ptr[off])); m_freem(m); return EINVAL; } /* Zeroize all other options. */ count = ptr[off + 1]; memcpy(ptr + off, ipseczeroes, count); off += count; break; } /* Sanity check. */ if (off > skip) { DPRINTF(("ah_massage_headers(): malformed " "IPv4 options header\n")); m_freem(m); return EINVAL; } } break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Ugly... */ /* Copy and "cook" the IPv6 header. */ m_copydata(m, 0, sizeof(ip6), &ip6); /* We don't do IPv6 Jumbograms. */ if (ip6.ip6_plen == 0) { DPRINTF(("ah_massage_headers: unsupported IPv6 jumbogram\n")); m_freem(m); return EMSGSIZE; } ip6.ip6_flow = 0; ip6.ip6_hlim = 0; ip6.ip6_vfc &= ~IPV6_VERSION_MASK; ip6.ip6_vfc |= IPV6_VERSION; /* Scoped address handling. */ if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_src)) ip6.ip6_src.s6_addr16[1] = 0; if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_dst)) ip6.ip6_dst.s6_addr16[1] = 0; /* Done with IPv6 header. */ m_copyback(m, 0, sizeof(struct ip6_hdr), &ip6); /* Let's deal with the remaining headers (if any). */ if (skip - sizeof(struct ip6_hdr) > 0) { if (m->m_len <= skip) { ptr = (unsigned char *) malloc( skip - sizeof(struct ip6_hdr), M_XDATA, M_NOWAIT); if (ptr == NULL) { DPRINTF(("ah_massage_headers: failed " "to allocate memory for IPv6 " "headers\n")); m_freem(m); return ENOBUFS; } /* * Copy all the protocol headers after * the IPv6 header. */ m_copydata(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); alloc = 1; } else { /* No need to allocate memory. */ ptr = mtod(m, unsigned char *) + sizeof(struct ip6_hdr); alloc = 0; } } else break; nxt = ip6.ip6_nxt & 0xff; /* Next header type. */ for (off = 0; off < skip - sizeof(struct ip6_hdr);) switch (nxt) { case IPPROTO_HOPOPTS: case IPPROTO_DSTOPTS: ip6e = (struct ip6_ext *) (ptr + off); /* * Process the mutable/immutable * options -- borrows heavily from the * KAME code. */ for (count = off + sizeof(struct ip6_ext); count < off + ((ip6e->ip6e_len + 1) << 3);) { if (ptr[count] == IP6OPT_PAD1) { count++; continue; /* Skip padding. */ } /* Sanity check. */ if (count > off + ((ip6e->ip6e_len + 1) << 3)) { m_freem(m); /* Free, if we allocated. */ if (alloc) free(ptr, M_XDATA); return EINVAL; } ad = ptr[count + 1]; /* If mutable option, zeroize. */ if (ptr[count] & IP6OPT_MUTABLE) memcpy(ptr + count, ipseczeroes, ptr[count + 1]); count += ad; /* Sanity check. */ if (count > skip - sizeof(struct ip6_hdr)) { m_freem(m); /* Free, if we allocated. */ if (alloc) free(ptr, M_XDATA); return EINVAL; } } /* Advance. */ off += ((ip6e->ip6e_len + 1) << 3); nxt = ip6e->ip6e_nxt; break; case IPPROTO_ROUTING: /* * Always include routing headers in * computation. */ { struct ip6_rthdr *rh; ip6e = (struct ip6_ext *) (ptr + off); rh = (struct ip6_rthdr *)(ptr + off); /* * must adjust content to make it look like * its final form (as seen at the final * destination). * we only know how to massage type 0 routing * header. */ if (out && rh->ip6r_type == IPV6_RTHDR_TYPE_0) { struct ip6_rthdr0 *rh0; struct in6_addr *addr, finaldst; int i; rh0 = (struct ip6_rthdr0 *)rh; addr = (struct in6_addr *)(rh0 + 1); for (i = 0; i < rh0->ip6r0_segleft; i++) in6_clearscope(&addr[i]); finaldst = addr[rh0->ip6r0_segleft - 1]; memmove(&addr[1], &addr[0], sizeof(struct in6_addr) * (rh0->ip6r0_segleft - 1)); m_copydata(m, 0, sizeof(ip6), &ip6); addr[0] = ip6.ip6_dst; ip6.ip6_dst = finaldst; m_copyback(m, 0, sizeof(ip6), &ip6); rh0->ip6r0_segleft = 0; } /* advance */ off += ((ip6e->ip6e_len + 1) << 3); nxt = ip6e->ip6e_nxt; break; } default: DPRINTF(("ah_massage_headers: unexpected " "IPv6 header type %d", off)); if (alloc) free(ptr, M_XDATA); m_freem(m); return EINVAL; } /* Copyback and free, if we allocated. */ if (alloc) { m_copyback(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); free(ptr, M_XDATA); } break; #endif /* INET6 */ }
void mec_start(struct ifnet *ifp) { struct mec_softc *sc = ifp->if_softc; struct mbuf *m0; struct mec_txdesc *txd; struct mec_txsoft *txs; bus_dmamap_t dmamap; bus_space_tag_t st = sc->sc_st; bus_space_handle_t sh = sc->sc_sh; uint64_t txdaddr; int error, firsttx, nexttx, opending; int len, bufoff, buflen, unaligned, txdlen; if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) return; /* * Remember the previous txpending and the first transmit descriptor. */ opending = sc->sc_txpending; firsttx = MEC_NEXTTX(sc->sc_txlast); DPRINTF(MEC_DEBUG_START, ("mec_start: opending = %d, firsttx = %d\n", opending, firsttx)); for (;;) { /* Grab a packet off the queue. */ IFQ_POLL(&ifp->if_snd, m0); if (m0 == NULL) break; if (sc->sc_txpending == MEC_NTXDESC) { break; } /* * Get the next available transmit descriptor. */ nexttx = MEC_NEXTTX(sc->sc_txlast); txd = &sc->sc_txdesc[nexttx]; txs = &sc->sc_txsoft[nexttx]; buflen = 0; bufoff = 0; txdaddr = 0; /* XXX gcc */ txdlen = 0; /* XXX gcc */ len = m0->m_pkthdr.len; DPRINTF(MEC_DEBUG_START, ("mec_start: len = %d, nexttx = %d\n", len, nexttx)); IFQ_DEQUEUE(&ifp->if_snd, m0); if (len < ETHER_PAD_LEN) { /* * I don't know if MEC chip does auto padding, * so if the packet is small enough, * just copy it to the buffer in txdesc. * Maybe this is the simple way. */ DPRINTF(MEC_DEBUG_START, ("mec_start: short packet\n")); bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN); m_copydata(m0, 0, m0->m_pkthdr.len, txd->txd_buf + bufoff); memset(txd->txd_buf + bufoff + len, 0, ETHER_PAD_LEN - len); len = buflen = ETHER_PAD_LEN; txs->txs_flags = MEC_TXS_TXDBUF | buflen; } else { /* * If the packet won't fit the buffer in txdesc, * we have to use concatinate pointer to handle it. * While MEC can handle up to three segments to * concatinate, MEC requires that both the second and * third segments have to be 8 byte aligned. * Since it's unlikely for mbuf clusters, we use * only the first concatinate pointer. If the packet * doesn't fit in one DMA segment, allocate new mbuf * and copy the packet to it. * * Besides, if the start address of the first segments * is not 8 byte aligned, such part have to be copied * to the txdesc buffer. (XXX see below comments) */ DPRINTF(MEC_DEBUG_START, ("mec_start: long packet\n")); dmamap = txs->txs_dmamap; if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) { struct mbuf *m; DPRINTF(MEC_DEBUG_START, ("mec_start: re-allocating mbuf\n")); MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { printf("%s: unable to allocate " "TX mbuf\n", sc->sc_dev.dv_xname); break; } if (len > (MHLEN - ETHER_ALIGN)) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { printf("%s: unable to allocate " "TX cluster\n", sc->sc_dev.dv_xname); m_freem(m); break; } } /* * Each packet has the Ethernet header, so * in many case the header isn't 4-byte aligned * and data after the header is 4-byte aligned. * Thus adding 2-byte offset before copying to * new mbuf avoids unaligned copy and this may * improve some performance. * As noted above, unaligned part has to be * copied to txdesc buffer so this may cause * extra copy ops, but for now MEC always * requires some data in txdesc buffer, * so we always have to copy some data anyway. */ m->m_data += ETHER_ALIGN; m_copydata(m0, 0, len, mtod(m, caddr_t)); m->m_pkthdr.len = m->m_len = len; m_freem(m0); m0 = m; error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); if (error) { printf("%s: unable to load TX buffer, " "error = %d\n", sc->sc_dev.dv_xname, error); m_freem(m); break; } } /* handle unaligned part */ txdaddr = MEC_TXD_ROUNDUP(dmamap->dm_segs[0].ds_addr); txs->txs_flags = MEC_TXS_TXDPTR1; unaligned = dmamap->dm_segs[0].ds_addr & (MEC_TXD_ALIGN - 1); DPRINTF(MEC_DEBUG_START, ("mec_start: ds_addr = 0x%x, unaligned = %d\n", (u_int)dmamap->dm_segs[0].ds_addr, unaligned)); if (unaligned != 0) { buflen = MEC_TXD_ALIGN - unaligned; bufoff = MEC_TXD_BUFSTART(buflen); DPRINTF(MEC_DEBUG_START, ("mec_start: unaligned, " "buflen = %d, bufoff = %d\n", buflen, bufoff)); memcpy(txd->txd_buf + bufoff, mtod(m0, caddr_t), buflen); txs->txs_flags |= MEC_TXS_TXDBUF | buflen; } #if 1 else { /* * XXX needs hardware info XXX * It seems MEC always requires some data * in txd_buf[] even if buffer is * 8-byte aligned otherwise DMA abort error * occurs later... */ buflen = MEC_TXD_ALIGN; bufoff = MEC_TXD_BUFSTART(buflen); memcpy(txd->txd_buf + bufoff, mtod(m0, caddr_t), buflen); DPRINTF(MEC_DEBUG_START, ("mec_start: aligned, " "buflen = %d, bufoff = %d\n", buflen, bufoff)); txs->txs_flags |= MEC_TXS_TXDBUF | buflen; txdaddr += MEC_TXD_ALIGN; } #endif txdlen = len - buflen; DPRINTF(MEC_DEBUG_START, ("mec_start: txdaddr = 0x%llx, txdlen = %d\n", txdaddr, txdlen)); /* * sync the DMA map for TX mbuf * * XXX unaligned part doesn't have to be sync'ed, * but it's harmless... */ bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); } #if NBPFILTER > 0 /* * Pass packet to bpf if there is a listener. */ if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); #endif /* * setup the transmit descriptor. */ /* TXINT bit will be set later on the last packet */ txd->txd_cmd = (len - 1); /* but also set TXINT bit on a half of TXDESC */ if (sc->sc_txpending == (MEC_NTXDESC / 2)) txd->txd_cmd |= MEC_TXCMD_TXINT; if (txs->txs_flags & MEC_TXS_TXDBUF) txd->txd_cmd |= TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen); if (txs->txs_flags & MEC_TXS_TXDPTR1) { txd->txd_cmd |= MEC_TXCMD_PTR1; txd->txd_ptr[0] = TXPTR_LEN(txdlen - 1) | txdaddr; /* * Store a pointer to the packet so we can * free it later. */ txs->txs_mbuf = m0; } else { txd->txd_ptr[0] = 0; /* * In this case all data are copied to buffer in txdesc, * we can free TX mbuf here. */ m_freem(m0); } DPRINTF(MEC_DEBUG_START, ("mec_start: txd_cmd = 0x%llx, txd_ptr = 0x%llx\n", txd->txd_cmd, txd->txd_ptr[0])); DPRINTF(MEC_DEBUG_START, ("mec_start: len = %d (0x%04x), buflen = %d (0x%02x)\n", len, len, buflen, buflen)); /* sync TX descriptor */ MEC_TXDESCSYNC(sc, nexttx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* advance the TX pointer. */ sc->sc_txpending++; sc->sc_txlast = nexttx; } if (sc->sc_txpending == MEC_NTXDESC) { /* No more slots; notify upper layer. */ ifp->if_flags |= IFF_OACTIVE; } if (sc->sc_txpending != opending) { /* * Cause a TX interrupt to happen on the last packet * we enqueued. */ sc->sc_txdesc[sc->sc_txlast].txd_cmd |= MEC_TXCMD_TXINT; MEC_TXCMDSYNC(sc, sc->sc_txlast, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* start TX */ bus_space_write_8(st, sh, MEC_TX_RING_PTR, MEC_NEXTTX(sc->sc_txlast)); /* * If the transmitter was idle, * reset the txdirty pointer and reenable TX interrupt. */ if (opending == 0) { sc->sc_txdirty = firsttx; bus_space_write_8(st, sh, MEC_TX_ALIAS, MEC_TX_ALIAS_INT_ENABLE); } /* Set a watchdog timer in case the chip flakes out. */ ifp->if_timer = 5; } }
void mec_txintr(struct mec_softc *sc, uint32_t stat) { struct ifnet *ifp = &sc->sc_ac.ac_if; struct mec_txdesc *txd; struct mec_txsoft *txs; bus_dmamap_t dmamap; uint64_t txstat; int i, last; u_int col; ifp->if_flags &= ~IFF_OACTIVE; DPRINTF(MEC_DEBUG_TXINTR, ("mec_txintr: called\n")); bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_TX_ALIAS, 0); last = (stat & MEC_INT_TX_RING_BUFFER_ALIAS) >> 16; DPRINTF(MEC_DEBUG_TXINTR, ("mec_txintr: dirty %d last %d\n", sc->sc_txdirty, last)); for (i = sc->sc_txdirty; i != last && sc->sc_txpending != 0; i = MEC_NEXTTX(i), sc->sc_txpending--) { txd = &sc->sc_txdesc[i]; MEC_TXDESCSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); txstat = txd->txd_stat; DPRINTF(MEC_DEBUG_TXINTR, ("mec_txintr: dirty = %d, txstat = 0x%llx\n", i, txstat)); if ((txstat & MEC_TXSTAT_SENT) == 0) { MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD); break; } if ((txstat & MEC_TXSTAT_SUCCESS) == 0) { printf("%s: TX error: txstat = 0x%llx\n", sc->sc_dev.dv_xname, txstat); ifp->if_oerrors++; continue; } txs = &sc->sc_txsoft[i]; if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) { dmamap = txs->txs_dmamap; bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, dmamap); m_freem(txs->txs_mbuf); txs->txs_mbuf = NULL; } col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT; ifp->if_collisions += col; ifp->if_opackets++; } /* update the dirty TX buffer pointer */ sc->sc_txdirty = i; DPRINTF(MEC_DEBUG_INTR, ("mec_txintr: sc_txdirty = %2d, sc_txpending = %2d\n", sc->sc_txdirty, sc->sc_txpending)); /* cancel the watchdog timer if there are no pending TX packets */ if (sc->sc_txpending == 0) ifp->if_timer = 0; else if (!(stat & MEC_INT_TX_EMPTY)) bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_TX_ALIAS, MEC_TX_ALIAS_INT_ENABLE); }
static void smc_start_locked(struct ifnet *ifp) { struct smc_softc *sc; struct mbuf *m; u_int len, npages, spin_count; sc = ifp->if_softc; SMC_ASSERT_LOCKED(sc); if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; if (IFQ_IS_EMPTY(&ifp->if_snd)) return; /* * Grab the next packet. If it's too big, drop it. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, m); len = m_length(m, NULL); len += (len & 1); if (len > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(ifp, "large packet discarded\n"); ++ifp->if_oerrors; m_freem(m); return; /* XXX readcheck? */ } /* * Flag that we're busy. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->smc_pending = m; /* * Work out how many 256 byte "pages" we need. We have to include the * control data for the packet in this calculation. */ npages = (len * PKT_CTRL_DATA_LEN) >> 8; if (npages == 0) npages = 1; /* * Request memory. */ smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_TX_ALLOC | npages); /* * Spin briefly to see if the allocation succeeds. */ spin_count = TX_ALLOC_WAIT_TIME; do { if (smc_read_1(sc, IST) & ALLOC_INT) { smc_write_1(sc, ACK, ALLOC_INT); break; } } while (--spin_count); /* * If the allocation is taking too long, unmask the alloc interrupt * and wait. */ if (spin_count == 0) { sc->smc_mask |= ALLOC_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); return; } taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); }
int udp6_input(struct mbuf **mp, int *offp, int proto) { struct mbuf *m = *mp; struct ifnet *ifp; struct ip6_hdr *ip6; struct udphdr *uh; struct inpcb *inp; struct inpcbinfo *pcbinfo; struct udpcb *up; int off = *offp; int cscov_partial; int plen, ulen; struct sockaddr_in6 fromsa; struct m_tag *fwd_tag; uint16_t uh_sum; uint8_t nxt; ifp = m->m_pkthdr.rcvif; ip6 = mtod(m, struct ip6_hdr *); #ifndef PULLDOWN_TEST IP6_EXTHDR_CHECK(m, off, sizeof(struct udphdr), IPPROTO_DONE); ip6 = mtod(m, struct ip6_hdr *); uh = (struct udphdr *)((caddr_t)ip6 + off); #else IP6_EXTHDR_GET(uh, struct udphdr *, m, off, sizeof(*uh)); if (!uh) return (IPPROTO_DONE); #endif UDPSTAT_INC(udps_ipackets); /* * Destination port of 0 is illegal, based on RFC768. */ if (uh->uh_dport == 0) goto badunlocked; plen = ntohs(ip6->ip6_plen) - off + sizeof(*ip6); ulen = ntohs((u_short)uh->uh_ulen); nxt = ip6->ip6_nxt; cscov_partial = (nxt == IPPROTO_UDPLITE) ? 1 : 0; if (nxt == IPPROTO_UDPLITE) { /* Zero means checksum over the complete packet. */ if (ulen == 0) ulen = plen; if (ulen == plen) cscov_partial = 0; if ((ulen < sizeof(struct udphdr)) || (ulen > plen)) { /* XXX: What is the right UDPLite MIB counter? */ goto badunlocked; } if (uh->uh_sum == 0) { /* XXX: What is the right UDPLite MIB counter? */ goto badunlocked; } } else { if ((ulen < sizeof(struct udphdr)) || (plen != ulen)) { UDPSTAT_INC(udps_badlen); goto badunlocked; } if (uh->uh_sum == 0) { UDPSTAT_INC(udps_nosum); goto badunlocked; } } if ((m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) && !cscov_partial) { if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) uh_sum = m->m_pkthdr.csum_data; else uh_sum = in6_cksum_pseudo(ip6, ulen, nxt, m->m_pkthdr.csum_data); uh_sum ^= 0xffff; } else uh_sum = in6_cksum_partial(m, nxt, off, plen, ulen); if (uh_sum != 0) { UDPSTAT_INC(udps_badsum); goto badunlocked; } /* * Construct sockaddr format source address. */ init_sin6(&fromsa, m); fromsa.sin6_port = uh->uh_sport; pcbinfo = get_inpcbinfo(nxt); if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { struct inpcb *last; struct inpcbhead *pcblist; struct ip6_moptions *imo; INP_INFO_RLOCK(pcbinfo); /* * In the event that laddr should be set to the link-local * address (this happens in RIPng), the multicast address * specified in the received packet will not match laddr. To * handle this situation, matching is relaxed if the * receiving interface is the same as one specified in the * socket and if the destination multicast address matches * one of the multicast groups specified in the socket. */ /* * KAME note: traditionally we dropped udpiphdr from mbuf * here. We need udphdr for IPsec processing so we do that * later. */ pcblist = get_pcblist(nxt); last = NULL; LIST_FOREACH(inp, pcblist, inp_list) { if ((inp->inp_vflag & INP_IPV6) == 0) continue; if (inp->inp_lport != uh->uh_dport) continue; if (inp->inp_fport != 0 && inp->inp_fport != uh->uh_sport) continue; if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) { if (!IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, &ip6->ip6_dst)) continue; } if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) { if (!IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr, &ip6->ip6_src) || inp->inp_fport != uh->uh_sport) continue; } /* * XXXRW: Because we weren't holding either the inpcb * or the hash lock when we checked for a match * before, we should probably recheck now that the * inpcb lock is (supposed to be) held. */ /* * Handle socket delivery policy for any-source * and source-specific multicast. [RFC3678] */ imo = inp->in6p_moptions; if (imo && IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { struct sockaddr_in6 mcaddr; int blocked; INP_RLOCK(inp); bzero(&mcaddr, sizeof(struct sockaddr_in6)); mcaddr.sin6_len = sizeof(struct sockaddr_in6); mcaddr.sin6_family = AF_INET6; mcaddr.sin6_addr = ip6->ip6_dst; blocked = im6o_mc_filter(imo, ifp, (struct sockaddr *)&mcaddr, (struct sockaddr *)&fromsa); if (blocked != MCAST_PASS) { if (blocked == MCAST_NOTGMEMBER) IP6STAT_INC(ip6s_notmember); if (blocked == MCAST_NOTSMEMBER || blocked == MCAST_MUTED) UDPSTAT_INC(udps_filtermcast); INP_RUNLOCK(inp); /* XXX */ continue; } INP_RUNLOCK(inp); } if (last != NULL) { struct mbuf *n; if ((n = m_copy(m, 0, M_COPYALL)) != NULL) { INP_RLOCK(last); UDP_PROBE(receive, NULL, last, ip6, last, uh); udp6_append(last, n, off, &fromsa); INP_RUNLOCK(last); } } last = inp; /* * Don't look for additional matches if this one does * not have either the SO_REUSEPORT or SO_REUSEADDR * socket options set. This heuristic avoids * searching through all pcbs in the common case of a * non-shared port. It assumes that an application * will never clear these options after setting them. */ if ((last->inp_socket->so_options & (SO_REUSEPORT|SO_REUSEADDR)) == 0) break; } if (last == NULL) { /* * No matching pcb found; discard datagram. (No need * to send an ICMP Port Unreachable for a broadcast * or multicast datgram.) */ UDPSTAT_INC(udps_noport); UDPSTAT_INC(udps_noportmcast); goto badheadlocked; } INP_RLOCK(last); INP_INFO_RUNLOCK(pcbinfo); UDP_PROBE(receive, NULL, last, ip6, last, uh); udp6_append(last, m, off, &fromsa); INP_RUNLOCK(last); return (IPPROTO_DONE); } /* * Locate pcb for datagram. */ /* * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. */ if ((m->m_flags & M_IP6_NEXTHOP) && (fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL)) != NULL) { struct sockaddr_in6 *next_hop6; next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1); /* * Transparently forwarded. Pretend to be the destination. * Already got one like this? */ inp = in6_pcblookup_mbuf(pcbinfo, &ip6->ip6_src, uh->uh_sport, &ip6->ip6_dst, uh->uh_dport, INPLOOKUP_RLOCKPCB, m->m_pkthdr.rcvif, m); if (!inp) { /* * It's new. Try to find the ambushing socket. * Because we've rewritten the destination address, * any hardware-generated hash is ignored. */ inp = in6_pcblookup(pcbinfo, &ip6->ip6_src, uh->uh_sport, &next_hop6->sin6_addr, next_hop6->sin6_port ? htons(next_hop6->sin6_port) : uh->uh_dport, INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, m->m_pkthdr.rcvif); } /* Remove the tag from the packet. We don't need it anymore. */ m_tag_delete(m, fwd_tag); m->m_flags &= ~M_IP6_NEXTHOP; } else inp = in6_pcblookup_mbuf(pcbinfo, &ip6->ip6_src, uh->uh_sport, &ip6->ip6_dst, uh->uh_dport, INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, m->m_pkthdr.rcvif, m); if (inp == NULL) { if (udp_log_in_vain) { char ip6bufs[INET6_ADDRSTRLEN]; char ip6bufd[INET6_ADDRSTRLEN]; log(LOG_INFO, "Connection attempt to UDP [%s]:%d from [%s]:%d\n", ip6_sprintf(ip6bufd, &ip6->ip6_dst), ntohs(uh->uh_dport), ip6_sprintf(ip6bufs, &ip6->ip6_src), ntohs(uh->uh_sport)); } UDPSTAT_INC(udps_noport); if (m->m_flags & M_MCAST) { printf("UDP6: M_MCAST is set in a unicast packet.\n"); UDPSTAT_INC(udps_noportmcast); goto badunlocked; } if (V_udp_blackhole) goto badunlocked; if (badport_bandlim(BANDLIM_ICMP6_UNREACH) < 0) goto badunlocked; icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_NOPORT, 0); return (IPPROTO_DONE); } INP_RLOCK_ASSERT(inp); up = intoudpcb(inp); if (cscov_partial) { if (up->u_rxcslen == 0 || up->u_rxcslen > ulen) { INP_RUNLOCK(inp); m_freem(m); return (IPPROTO_DONE); } } UDP_PROBE(receive, NULL, inp, ip6, inp, uh); udp6_append(inp, m, off, &fromsa); INP_RUNLOCK(inp); return (IPPROTO_DONE); badheadlocked: INP_INFO_RUNLOCK(pcbinfo); badunlocked: if (m) m_freem(m); return (IPPROTO_DONE); }
static void smc_task_tx(void *context, int pending) { struct ifnet *ifp; struct smc_softc *sc; struct mbuf *m, *m0; u_int packet, len; int last_len; uint8_t *data; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; SMC_LOCK(sc); if (sc->smc_pending == NULL) { SMC_UNLOCK(sc); goto next_packet; } m = m0 = sc->smc_pending; sc->smc_pending = NULL; smc_select_bank(sc, 2); /* * Check the allocation result. */ packet = smc_read_1(sc, ARR); /* * If the allocation failed, requeue the packet and retry. */ if (packet & ARR_FAILED) { IFQ_DRV_PREPEND(&ifp->if_snd, m); ++ifp->if_oerrors; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; smc_start_locked(ifp); SMC_UNLOCK(sc); return; } /* * Tell the device to write to our packet number. */ smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_AUTO_INCR); /* * Tell the device how long the packet is (including control data). */ len = m_length(m, 0); len += PKT_CTRL_DATA_LEN; smc_write_2(sc, DATA0, 0); smc_write_2(sc, DATA0, len); /* * Push the data out to the device. */ data = NULL; last_len = 0; for (; m != NULL; m = m->m_next) { data = mtod(m, uint8_t *); smc_write_multi_2(sc, DATA0, (uint16_t *)data, m->m_len / 2); last_len = m->m_len; } /* * Push out the control byte and and the odd byte if needed. */ if ((len & 1) != 0 && data != NULL) smc_write_2(sc, DATA0, (CTRL_ODD << 8) | data[last_len - 1]); else smc_write_2(sc, DATA0, 0); /* * Unmask the TX empty interrupt. */ sc->smc_mask |= TX_EMPTY_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); /* * Enqueue the packet. */ smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_ENQUEUE); callout_reset(&sc->smc_watchdog, hz * 2, smc_watchdog, sc); /* * Finish up. */ ifp->if_opackets++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; SMC_UNLOCK(sc); BPF_MTAP(ifp, m0); m_freem(m0); next_packet: /* * See if there's anything else to do. */ smc_start(ifp); }
static int udp6_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr6, struct mbuf *control, struct thread *td) { u_int32_t ulen = m->m_pkthdr.len; u_int32_t plen = sizeof(struct udphdr) + ulen; struct ip6_hdr *ip6; struct udphdr *udp6; struct in6_addr *laddr, *faddr, in6a; struct sockaddr_in6 *sin6 = NULL; struct ifnet *oifp = NULL; int cscov_partial = 0; int scope_ambiguous = 0; u_short fport; int error = 0; uint8_t nxt; uint16_t cscov = 0; struct ip6_pktopts *optp, opt; int af = AF_INET6, hlen = sizeof(struct ip6_hdr); int flags; struct sockaddr_in6 tmp; INP_WLOCK_ASSERT(inp); INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo); if (addr6) { /* addr6 has been validated in udp6_send(). */ sin6 = (struct sockaddr_in6 *)addr6; /* protect *sin6 from overwrites */ tmp = *sin6; sin6 = &tmp; /* * Application should provide a proper zone ID or the use of * default zone IDs should be enabled. Unfortunately, some * applications do not behave as it should, so we need a * workaround. Even if an appropriate ID is not determined, * we'll see if we can determine the outgoing interface. If we * can, determine the zone ID based on the interface below. */ if (sin6->sin6_scope_id == 0 && !V_ip6_use_defzone) scope_ambiguous = 1; if ((error = sa6_embedscope(sin6, V_ip6_use_defzone)) != 0) return (error); } if (control) { if ((error = ip6_setpktopts(control, &opt, inp->in6p_outputopts, td->td_ucred, IPPROTO_UDP)) != 0) goto release; optp = &opt; } else optp = inp->in6p_outputopts; if (sin6) { faddr = &sin6->sin6_addr; /* * Since we saw no essential reason for calling in_pcbconnect, * we get rid of such kind of logic, and call in6_selectsrc * and in6_pcbsetport in order to fill in the local address * and the local port. */ if (sin6->sin6_port == 0) { error = EADDRNOTAVAIL; goto release; } if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) { /* how about ::ffff:0.0.0.0 case? */ error = EISCONN; goto release; } fport = sin6->sin6_port; /* allow 0 port */ if (IN6_IS_ADDR_V4MAPPED(faddr)) { if ((inp->inp_flags & IN6P_IPV6_V6ONLY)) { /* * I believe we should explicitly discard the * packet when mapped addresses are disabled, * rather than send the packet as an IPv6 one. * If we chose the latter approach, the packet * might be sent out on the wire based on the * default route, the situation which we'd * probably want to avoid. * (20010421 [email protected]) */ error = EINVAL; goto release; } if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) && !IN6_IS_ADDR_V4MAPPED(&inp->in6p_laddr)) { /* * when remote addr is an IPv4-mapped address, * local addr should not be an IPv6 address, * since you cannot determine how to map IPv6 * source address to IPv4. */ error = EINVAL; goto release; } af = AF_INET; } if (!IN6_IS_ADDR_V4MAPPED(faddr)) { error = in6_selectsrc(sin6, optp, inp, NULL, td->td_ucred, &oifp, &in6a); if (error) goto release; if (oifp && scope_ambiguous && (error = in6_setscope(&sin6->sin6_addr, oifp, NULL))) { goto release; } laddr = &in6a; } else laddr = &inp->in6p_laddr; /* XXX */ if (laddr == NULL) { if (error == 0) error = EADDRNOTAVAIL; goto release; } if (inp->inp_lport == 0 && (error = in6_pcbsetport(laddr, inp, td->td_ucred)) != 0) { /* Undo an address bind that may have occurred. */ inp->in6p_laddr = in6addr_any; goto release; } } else { if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) { error = ENOTCONN; goto release; } if (IN6_IS_ADDR_V4MAPPED(&inp->in6p_faddr)) { if ((inp->inp_flags & IN6P_IPV6_V6ONLY)) { /* * XXX: this case would happen when the * application sets the V6ONLY flag after * connecting the foreign address. * Such applications should be fixed, * so we bark here. */ log(LOG_INFO, "udp6_output: IPV6_V6ONLY " "option was set for a connected socket\n"); error = EINVAL; goto release; } else af = AF_INET; } laddr = &inp->in6p_laddr; faddr = &inp->in6p_faddr; fport = inp->inp_fport; } if (af == AF_INET) hlen = sizeof(struct ip); /* * Calculate data length and get a mbuf * for UDP and IP6 headers. */ M_PREPEND(m, hlen + sizeof(struct udphdr), M_NOWAIT); if (m == 0) { error = ENOBUFS; goto release; } /* * Stuff checksum and output datagram. */ nxt = (inp->inp_socket->so_proto->pr_protocol == IPPROTO_UDP) ? IPPROTO_UDP : IPPROTO_UDPLITE; udp6 = (struct udphdr *)(mtod(m, caddr_t) + hlen); udp6->uh_sport = inp->inp_lport; /* lport is always set in the PCB */ udp6->uh_dport = fport; if (nxt == IPPROTO_UDPLITE) { struct udpcb *up; up = intoudpcb(inp); cscov = up->u_txcslen; if (cscov >= plen) cscov = 0; udp6->uh_ulen = htons(cscov); /* * For UDP-Lite, checksum coverage length of zero means * the entire UDPLite packet is covered by the checksum. */ cscov_partial = (cscov == 0) ? 0 : 1; } else if (plen <= 0xffff) udp6->uh_ulen = htons((u_short)plen); else udp6->uh_ulen = 0; udp6->uh_sum = 0; switch (af) { case AF_INET6: ip6 = mtod(m, struct ip6_hdr *); ip6->ip6_flow = inp->inp_flow & IPV6_FLOWINFO_MASK; ip6->ip6_vfc &= ~IPV6_VERSION_MASK; ip6->ip6_vfc |= IPV6_VERSION; ip6->ip6_plen = htons((u_short)plen); ip6->ip6_nxt = nxt; ip6->ip6_hlim = in6_selecthlim(inp, NULL); ip6->ip6_src = *laddr; ip6->ip6_dst = *faddr; if (cscov_partial) { if ((udp6->uh_sum = in6_cksum_partial(m, nxt, sizeof(struct ip6_hdr), plen, cscov)) == 0) udp6->uh_sum = 0xffff; } else { udp6->uh_sum = in6_cksum_pseudo(ip6, plen, nxt, 0); m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); } /* * XXX for now assume UDP is 2-tuple. * Later on this may become configurable as 4-tuple; * we should support that. * * XXX .. and we should likely cache this in the inpcb. */ #ifdef RSS m->m_pkthdr.flowid = rss_hash_ip6_2tuple(*faddr, *laddr); M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV6); #endif flags = 0; #ifdef RSS /* * Don't override with the inp cached flowid. * * Until the whole UDP path is vetted, it may actually * be incorrect. */ flags |= IP_NODEFAULTFLOWID; #endif UDP_PROBE(send, NULL, inp, ip6, inp, udp6); UDPSTAT_INC(udps_opackets); error = ip6_output(m, optp, NULL, flags, inp->in6p_moptions, NULL, inp); break; case AF_INET: error = EAFNOSUPPORT; goto release; } goto releaseopt; release: m_freem(m); releaseopt: if (control) { ip6_clearpktopts(&opt, -1); m_freem(control); } return (error); }
int ipfw_check_in(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir, struct inpcb *inp) { struct ip_fw_args args; struct m_tag *dn_tag; int ipfw = 0; int divert; #ifdef IPFIREWALL_FORWARD struct m_tag *fwd_tag; #endif KASSERT(dir == PFIL_IN, ("ipfw_check_in wrong direction!")); if (!fw_enable) goto pass; bzero(&args, sizeof(args)); dn_tag = m_tag_find(*m0, PACKET_TAG_DUMMYNET, NULL); if (dn_tag != NULL){ struct dn_pkt_tag *dt; dt = (struct dn_pkt_tag *)(dn_tag+1); args.rule = dt->rule; m_tag_delete(*m0, dn_tag); } again: args.m = *m0; args.inp = inp; ipfw = ipfw_chk(&args); *m0 = args.m; if ((ipfw & IP_FW_PORT_DENY_FLAG) || *m0 == NULL) goto drop; if (ipfw == 0 && args.next_hop == NULL) goto pass; if (DUMMYNET_LOADED && (ipfw & IP_FW_PORT_DYNT_FLAG) != 0) { ip_dn_io_ptr(*m0, ipfw & 0xffff, DN_TO_IP_IN, &args); *m0 = NULL; return 0; /* packet consumed */ } if (ipfw != 0 && (ipfw & IP_FW_PORT_DYNT_FLAG) == 0) { if ((ipfw & IP_FW_PORT_TEE_FLAG) != 0) divert = ipfw_divert(m0, DIV_DIR_IN, 1); else divert = ipfw_divert(m0, DIV_DIR_IN, 0); /* tee should continue again with the firewall. */ if (divert) { *m0 = NULL; return 0; /* packet consumed */ } else goto again; /* continue with packet */ } #ifdef IPFIREWALL_FORWARD if (ipfw == 0 && args.next_hop != NULL) { fwd_tag = m_tag_get(PACKET_TAG_IPFORWARD, sizeof(struct sockaddr_in), M_NOWAIT); if (fwd_tag == NULL) goto drop; bcopy(args.next_hop, (fwd_tag+1), sizeof(struct sockaddr_in)); m_tag_prepend(*m0, fwd_tag); if (in_localip(args.next_hop->sin_addr)) (*m0)->m_flags |= M_FASTFWD_OURS; goto pass; } #endif drop: if (*m0) m_freem(*m0); *m0 = NULL; return (EACCES); pass: return 0; /* not filtered */ }
/* * ste_start: [ifnet interface function] * * Start packet transmission on the interface. */ static void ste_start(struct ifnet *ifp) { struct ste_softc *sc = ifp->if_softc; struct mbuf *m0, *m; struct ste_descsoft *ds; struct ste_tfd *tfd; bus_dmamap_t dmamap; int error, olasttx, nexttx, opending, seg, totlen; if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) return; /* * Remember the previous number of pending transmissions * and the current last descriptor in the list. */ opending = sc->sc_txpending; olasttx = sc->sc_txlast; /* * Loop through the send queue, setting up transmit descriptors * until we drain the queue, or use up all available transmit * descriptors. */ while (sc->sc_txpending < STE_NTXDESC) { /* * Grab a packet off the queue. */ IFQ_POLL(&ifp->if_snd, m0); if (m0 == NULL) break; m = NULL; /* * Get the last and next available transmit descriptor. */ nexttx = STE_NEXTTX(sc->sc_txlast); tfd = &sc->sc_txdescs[nexttx]; ds = &sc->sc_txsoft[nexttx]; dmamap = ds->ds_dmamap; /* * Load the DMA map. If this fails, the packet either * didn't fit in the alloted number of segments, or we * were short on resources. In this case, we'll copy * and try again. */ if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { printf("%s: unable to allocate Tx mbuf\n", device_xname(&sc->sc_dev)); break; } if (m0->m_pkthdr.len > MHLEN) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { printf("%s: unable to allocate Tx " "cluster\n", device_xname(&sc->sc_dev)); m_freem(m); break; } } m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); if (error) { printf("%s: unable to load Tx buffer, " "error = %d\n", device_xname(&sc->sc_dev), error); break; } } IFQ_DEQUEUE(&ifp->if_snd, m0); if (m != NULL) { m_freem(m0); m0 = m; } /* * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ /* Sync the DMA map. */ bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); /* Initialize the fragment list. */ for (totlen = 0, seg = 0; seg < dmamap->dm_nsegs; seg++) { tfd->tfd_frags[seg].frag_addr = htole32(dmamap->dm_segs[seg].ds_addr); tfd->tfd_frags[seg].frag_len = htole32(dmamap->dm_segs[seg].ds_len); totlen += dmamap->dm_segs[seg].ds_len; } tfd->tfd_frags[seg - 1].frag_len |= htole32(FRAG_LAST); /* Initialize the descriptor. */ tfd->tfd_next = htole32(STE_CDTXADDR(sc, nexttx)); tfd->tfd_control = htole32(TFD_FrameId(nexttx) | (totlen & 3)); /* Sync the descriptor. */ STE_CDTXSYNC(sc, nexttx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* * Store a pointer to the packet so we can free it later, * and remember what txdirty will be once the packet is * done. */ ds->ds_mbuf = m0; /* Advance the tx pointer. */ sc->sc_txpending++; sc->sc_txlast = nexttx; #if NBPFILTER > 0 /* * Pass the packet to any BPF listeners. */ if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m0); #endif /* NBPFILTER > 0 */ }
/* * Make space for a new header of length hlen at skip bytes * into the packet. When doing this we allocate new mbufs only * when absolutely necessary. The mbuf where the new header * is to go is returned together with an offset into the mbuf. * If NULL is returned then the mbuf chain may have been modified; * the caller is assumed to always free the chain. */ struct mbuf * m_makespace(struct mbuf *m0, int skip, int hlen, int *off) { struct mbuf *m; unsigned remain; IPSEC_ASSERT(m0 != NULL, ("null mbuf")); IPSEC_ASSERT(hlen < MHLEN, ("hlen too big: %u", hlen)); for (m = m0; m && skip > m->m_len; m = m->m_next) skip -= m->m_len; if (m == NULL) return (NULL); /* * At this point skip is the offset into the mbuf m * where the new header should be placed. Figure out * if there's space to insert the new header. If so, * and copying the remainder makes sense then do so. * Otherwise insert a new mbuf in the chain, splitting * the contents of m as needed. */ remain = m->m_len - skip; /* data to move */ if (hlen > M_TRAILINGSPACE(m)) { struct mbuf *n0, *n, **np; int todo, len, done, alloc; n0 = NULL; np = &n0; alloc = 0; done = 0; todo = remain; while (todo > 0) { if (todo > MHLEN) { n = m_getcl(M_NOWAIT, m->m_type, 0); len = MCLBYTES; } else { n = m_get(M_NOWAIT, m->m_type); len = MHLEN; } if (n == NULL) { m_freem(n0); return NULL; } *np = n; np = &n->m_next; alloc++; len = min(todo, len); memcpy(n->m_data, mtod(m, char *) + skip + done, len); n->m_len = len; done += len; todo -= len; } if (hlen <= M_TRAILINGSPACE(m) + remain) { m->m_len = skip + hlen; *off = skip; if (n0 != NULL) { *np = m->m_next; m->m_next = n0; } } else { n = m_get(M_NOWAIT, m->m_type); if (n == NULL) { m_freem(n0); return NULL; } alloc++; if ((n->m_next = n0) == NULL) np = &n->m_next; n0 = n; *np = m->m_next; m->m_next = n0; n->m_len = hlen; m->m_len = skip; m = n; /* header is at front ... */ *off = 0; /* ... of new mbuf */ } IPSECSTAT_INC(ips_mbinserted); } else {
static void ofnet_read(struct ofnet_softc *of) { struct ifnet *ifp = &of->sc_ethercom.ec_if; struct mbuf *m, **mp, *head; int s, l, len; char *bufp; s = splnet(); #if NIPKDB_OFN > 0 ipkdbrint(kifp, ifp); #endif for (;;) { len = OF_read(of->sc_ihandle, buf, sizeof buf); if (len == -2 || len == 0) break; if (len < sizeof(struct ether_header)) { ifp->if_ierrors++; continue; } bufp = buf; /* * We don't know if the interface included the FCS * or not. For now, assume that it did if we got * a packet length that looks like it could include * the FCS. * * XXX Yuck. */ if (len > ETHER_MAX_LEN - ETHER_CRC_LEN) len = ETHER_MAX_LEN - ETHER_CRC_LEN; /* Allocate a header mbuf */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == 0) { ifp->if_ierrors++; continue; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = len; l = MHLEN; head = 0; mp = &head; while (len > 0) { if (head) { MGET(m, M_DONTWAIT, MT_DATA); if (m == 0) { ifp->if_ierrors++; m_freem(head); head = 0; break; } l = MLEN; } if (len >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { ifp->if_ierrors++; m_free(m); m_freem(head); head = 0; break; } l = MCLBYTES; } /* * Make sure the data after the Ethernet header * is aligned. * * XXX Assumes the device is an ethernet, but * XXX then so does other code in this driver. */ if (head == NULL) { char *newdata = (char *)ALIGN(m->m_data + sizeof(struct ether_header)) - sizeof(struct ether_header); l -= newdata - m->m_data; m->m_data = newdata; } m->m_len = l = min(len, l); bcopy(bufp, mtod(m, char *), l); bufp += l; len -= l; *mp = m; mp = &m->m_next; } if (head == 0) continue; #if NBPFILTER > 0 if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m); #endif ifp->if_ipackets++; (*ifp->if_input)(ifp, head); } splx(s); }
/* * Input an Neighbor Solicitation Message. * * Based on RFC 2461 * Based on RFC 2462 (duplicated address detection) */ void nd6_ns_input(struct mbuf *m, int off, int icmp6len) { struct ifnet *ifp = m->m_pkthdr.rcvif; struct ifnet *cmpifp; struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); struct nd_neighbor_solicit *nd_ns; struct in6_addr saddr6 = ip6->ip6_src; struct in6_addr daddr6 = ip6->ip6_dst; struct in6_addr taddr6; struct in6_addr myaddr6; char *lladdr = NULL; struct ifaddr *ifa = NULL; int lladdrlen = 0; int anycast = 0, proxy = 0, tentative = 0; int tlladdr; union nd_opts ndopts; struct sockaddr_dl *proxydl = NULL; /* * Collapse interfaces to the bridge for comparison and * mac (llinfo) purposes. */ cmpifp = ifp; if (ifp->if_bridge) cmpifp = ifp->if_bridge; #ifndef PULLDOWN_TEST IP6_EXTHDR_CHECK(m, off, icmp6len,); nd_ns = (struct nd_neighbor_solicit *)((caddr_t)ip6 + off); #else IP6_EXTHDR_GET(nd_ns, struct nd_neighbor_solicit *, m, off, icmp6len); if (nd_ns == NULL) { icmp6stat.icp6s_tooshort++; return; } #endif ip6 = mtod(m, struct ip6_hdr *); /* adjust pointer for safety */ taddr6 = nd_ns->nd_ns_target; if (ip6->ip6_hlim != 255) { nd6log((LOG_ERR, "nd6_ns_input: invalid hlim (%d) from %s to %s on %s\n", ip6->ip6_hlim, ip6_sprintf(&ip6->ip6_src), ip6_sprintf(&ip6->ip6_dst), if_name(ifp))); goto bad; } if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) { /* dst has to be solicited node multicast address. */ if (daddr6.s6_addr16[0] == IPV6_ADDR_INT16_MLL && /* don't check ifindex portion */ daddr6.s6_addr32[1] == 0 && daddr6.s6_addr32[2] == IPV6_ADDR_INT32_ONE && daddr6.s6_addr8[12] == 0xff) { ; /* good */ } else { nd6log((LOG_INFO, "nd6_ns_input: bad DAD packet " "(wrong ip6 dst)\n")); goto bad; } } else if (!nd6_onlink_ns_rfc4861) { /* * Make sure the source address is from a neighbor's address. * * XXX probably only need to check cmpifp. */ if (in6ifa_ifplocaladdr(cmpifp, &saddr6) == NULL && in6ifa_ifplocaladdr(ifp, &saddr6) == NULL) { nd6log((LOG_INFO, "nd6_ns_input: " "NS packet from non-neighbor\n")); goto bad; } } if (IN6_IS_ADDR_MULTICAST(&taddr6)) { nd6log((LOG_INFO, "nd6_ns_input: bad NS target (multicast)\n")); goto bad; } if (IN6_IS_SCOPE_LINKLOCAL(&taddr6)) taddr6.s6_addr16[1] = htons(ifp->if_index); icmp6len -= sizeof(*nd_ns); nd6_option_init(nd_ns + 1, icmp6len, &ndopts); if (nd6_options(&ndopts) < 0) { nd6log((LOG_INFO, "nd6_ns_input: invalid ND option, ignored\n")); /* nd6_options have incremented stats */ goto freeit; } if (ndopts.nd_opts_src_lladdr) { lladdr = (char *)(ndopts.nd_opts_src_lladdr + 1); lladdrlen = ndopts.nd_opts_src_lladdr->nd_opt_len << 3; } if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src) && lladdr) { nd6log((LOG_INFO, "nd6_ns_input: bad DAD packet " "(link-layer address option)\n")); goto bad; } /* * Attaching target link-layer address to the NA? * (RFC 2461 7.2.4) * * NS IP dst is unicast/anycast MUST NOT add * NS IP dst is solicited-node multicast MUST add * * In implementation, we add target link-layer address by default. * We do not add one in MUST NOT cases. */ #if 0 /* too much! */ ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, &daddr6); if (ifa && (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_ANYCAST)) tlladdr = 0; else #endif if (!IN6_IS_ADDR_MULTICAST(&daddr6)) tlladdr = 0; else tlladdr = 1; /* * Target address (taddr6) must be either: * (1) Valid unicast/anycast address for my receiving interface. * (2) Unicast or anycast address for which I'm offering proxy * service. * (3) "tentative" address on which DAD is being performed. */ /* (1) and (3) check. */ #ifdef CARP if (ifp->if_carp) ifa = carp_iamatch6(ifp->if_carp, &taddr6); if (!ifa) ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, &taddr6); #else ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, &taddr6); #endif /* * (2) Check proxying. Requires ip6_forwarding to be turned on. * * If the packet is anycast the target route must be on a * different interface because the anycast will get anything * on the current interface. * * If the packet is unicast the target route may be on the * same interface. If the gateway is a (typically manually * configured) link address we can directly offer it. * XXX for now we don't do this but instead offer ours and * presumably relay. * * WARNING! Since this is a subnet proxy the interface proxying * the ND6 must be in promiscuous mode or it will not see the * solicited multicast requests for various hosts being proxied. * * WARNING! Since this is a subnet proxy we have to treat bridge * interfaces as being the bridge itself so we do not proxy-nd6 * between bridge interfaces (which are effectively switched). * * (In the specific-host-proxy case via RTF_ANNOUNCE, which is * a bitch to configure, a specific multicast route is already * added for that host <-- NOT RECOMMENDED). */ if (!ifa && ip6_forwarding) { struct rtentry *rt; struct sockaddr_in6 tsin6; struct ifnet *rtifp; bzero(&tsin6, sizeof tsin6); tsin6.sin6_len = sizeof(struct sockaddr_in6); tsin6.sin6_family = AF_INET6; tsin6.sin6_addr = taddr6; rt = rtpurelookup((struct sockaddr *)&tsin6); rtifp = rt ? rt->rt_ifp : NULL; if (rtifp && rtifp->if_bridge) rtifp = rtifp->if_bridge; if (rt != NULL && (cmpifp != rtifp || (cmpifp == rtifp && (m->m_flags & M_MCAST) == 0)) ) { ifa = (struct ifaddr *)in6ifa_ifpforlinklocal(cmpifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); nd6log((LOG_INFO, "nd6_ns_input: nd6 proxy %s(%s)<-%s ifa %p\n", if_name(cmpifp), if_name(ifp), if_name(rtifp), ifa)); if (ifa) { proxy = 1; /* * Manual link address on same interface * w/announce flag will proxy-arp using * target mac, else our mac is used. */ if (cmpifp == rtifp && (rt->rt_flags & RTF_ANNOUNCE) && rt->rt_gateway->sa_family == AF_LINK) { proxydl = SDL(rt->rt_gateway); } } } if (rt != NULL) --rt->rt_refcnt; } if (ifa == NULL) { /* * We've got an NS packet, and we don't have that adddress * assigned for us. We MUST silently ignore it. * See RFC2461 7.2.3. */ goto freeit; } myaddr6 = *IFA_IN6(ifa); anycast = ((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_ANYCAST; tentative = ((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_TENTATIVE; if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DUPLICATED) goto freeit; if (lladdr && ((cmpifp->if_addrlen + 2 + 7) & ~7) != lladdrlen) { nd6log((LOG_INFO, "nd6_ns_input: lladdrlen mismatch for %s " "(if %d, NS packet %d)\n", ip6_sprintf(&taddr6), cmpifp->if_addrlen, lladdrlen - 2)); goto bad; } if (IN6_ARE_ADDR_EQUAL(&myaddr6, &saddr6)) { nd6log((LOG_INFO, "nd6_ns_input: duplicate IP6 address %s\n", ip6_sprintf(&saddr6))); goto freeit; } /* * We have neighbor solicitation packet, with target address equals to * one of my tentative address. * * src addr how to process? * --- --- * multicast of course, invalid (rejected in ip6_input) * unicast somebody is doing address resolution -> ignore * unspec dup address detection * * The processing is defined in RFC 2462. */ if (tentative) { /* * If source address is unspecified address, it is for * duplicated address detection. * * If not, the packet is for addess resolution; * silently ignore it. */ if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) nd6_dad_ns_input(ifa); goto freeit; } /* * If the source address is unspecified address, entries must not * be created or updated. * It looks that sender is performing DAD. Output NA toward * all-node multicast address, to tell the sender that I'm using * the address. * S bit ("solicited") must be zero. */ if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) { saddr6 = kin6addr_linklocal_allnodes; saddr6.s6_addr16[1] = htons(cmpifp->if_index); nd6_na_output(cmpifp, &saddr6, &taddr6, ((anycast || proxy || !tlladdr) ? 0 : ND_NA_FLAG_OVERRIDE) | (ip6_forwarding ? ND_NA_FLAG_ROUTER : 0), tlladdr, (struct sockaddr *)proxydl); goto freeit; } nd6_cache_lladdr(cmpifp, &saddr6, lladdr, lladdrlen, ND_NEIGHBOR_SOLICIT, 0); nd6_na_output(ifp, &saddr6, &taddr6, ((anycast || proxy || !tlladdr) ? 0 : ND_NA_FLAG_OVERRIDE) | (ip6_forwarding ? ND_NA_FLAG_ROUTER : 0) | ND_NA_FLAG_SOLICITED, tlladdr, (struct sockaddr *)proxydl); freeit: m_freem(m); return; bad: nd6log((LOG_ERR, "nd6_ns_input: src=%s\n", ip6_sprintf(&saddr6))); nd6log((LOG_ERR, "nd6_ns_input: dst=%s\n", ip6_sprintf(&daddr6))); nd6log((LOG_ERR, "nd6_ns_input: tgt=%s\n", ip6_sprintf(&taddr6))); icmp6stat.icp6s_badns++; m_freem(m); }
void octeon_eth_start(struct ifnet *ifp) { struct octeon_eth_softc *sc = ifp->if_softc; struct mbuf *m; /* * performance tuning * presend iobdma request */ octeon_eth_send_queue_flush_prefetch(sc); if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) goto last; /* XXX assume that OCTEON doesn't buffer packets */ if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port))) { /* dequeue and drop them */ while (1) { IFQ_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; #if 0 #ifdef DDB m_print(m, "cd", printf); #endif printf("%s: drop\n", sc->sc_dev.dv_xname); #endif m_freem(m); IF_DROP(&ifp->if_snd); } goto last; } for (;;) { IFQ_POLL(&ifp->if_snd, m); if (__predict_false(m == NULL)) break; octeon_eth_send_queue_flush_fetch(sc); /* XXX */ /* * XXXSEIL * If no free send buffer is available, free all the sent buffer * and bail out. */ if (octeon_eth_send_queue_is_full(sc)) { return; } /* XXX */ IFQ_DEQUEUE(&ifp->if_snd, m); OCTEON_ETH_TAP(ifp, m, BPF_DIRECTION_OUT); /* XXX */ if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) octeon_eth_send_queue_flush(sc); if (octeon_eth_send(sc, m)) { ifp->if_oerrors++; m_freem(m); log(LOG_WARNING, "%s: failed to transmit packet\n", sc->sc_dev.dv_xname); } else { sc->sc_soft_req_cnt++; } if (sc->sc_flush) octeon_eth_send_queue_flush_sync(sc); /* XXX */ /* * send next iobdma request */ octeon_eth_send_queue_flush_prefetch(sc); } /* * XXXSEIL * Don't schedule send-buffer-free callout every time - those buffers are freed * by "free tick". This makes some packets like NFS slower, but it normally * doesn't happen on SEIL. */ #ifdef OCTEON_ETH_USENFS if (__predict_false(sc->sc_ext_callback_cnt > 0)) { int timo; /* ??? */ timo = hz - (100 * sc->sc_ext_callback_cnt); if (timo < 10) timo = 10; callout_schedule(&sc->sc_tick_free_ch, timo); } #endif last: octeon_eth_send_queue_flush_fetch(sc); }
/* * icoutput() */ static int icoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct route *ro) { struct ic_softc *sc = ifp->if_softc; device_t icdev = sc->ic_dev; device_t parent = device_get_parent(icdev); int len, sent; struct mbuf *mm; u_char *cp; u_int32_t hdr; /* BPF writes need to be handled specially. */ if (dst->sa_family == AF_UNSPEC) bcopy(dst->sa_data, &hdr, sizeof(hdr)); else hdr = dst->sa_family; mtx_lock(&sc->ic_lock); ifp->if_drv_flags |= IFF_DRV_RUNNING; /* already sending? */ if (sc->ic_flags & IC_SENDING) { ifp->if_oerrors++; goto error; } /* insert header */ bcopy ((char *)&hdr, sc->ic_obuf, ICHDRLEN); cp = sc->ic_obuf + ICHDRLEN; len = 0; mm = m; do { if (len + mm->m_len > sc->ic_ifp->if_mtu) { /* packet too large */ ifp->if_oerrors++; goto error; } bcopy(mtod(mm,char *), cp, mm->m_len); cp += mm->m_len; len += mm->m_len; } while ((mm = mm->m_next)); BPF_MTAP2(ifp, &hdr, sizeof(hdr), m); sc->ic_flags |= (IC_SENDING | IC_OBUF_BUSY); m_freem(m); mtx_unlock(&sc->ic_lock); /* send the packet */ if (iicbus_block_write(parent, sc->ic_addr, sc->ic_obuf, len + ICHDRLEN, &sent)) ifp->if_oerrors++; else { ifp->if_opackets++; ifp->if_obytes += len; } mtx_lock(&sc->ic_lock); sc->ic_flags &= ~(IC_SENDING | IC_OBUF_BUSY); if ((sc->ic_flags & (IC_BUFFERS_BUSY | IC_BUFFER_WAITER)) == IC_BUFFER_WAITER) wakeup(&sc); mtx_unlock(&sc->ic_lock); return (0); error: m_freem(m); mtx_unlock(&sc->ic_lock); return(0); }
struct mbuf * m_copym0(struct mbuf *m, int off, int len, int wait, int deep) { struct mbuf *n, **np; struct mbuf *top; int copyhdr = 0; if (off < 0 || len < 0) panic("m_copym0: off %d, len %d", off, len); if (off == 0 && m->m_flags & M_PKTHDR) copyhdr = 1; while (off > 0) { if (m == NULL) panic("m_copym0: null mbuf"); if (off < m->m_len) break; off -= m->m_len; m = m->m_next; } np = ⊤ top = NULL; while (len > 0) { if (m == NULL) { if (len != M_COPYALL) panic("m_copym0: m == NULL and not COPYALL"); break; } MGET(n, wait, m->m_type); *np = n; if (n == NULL) goto nospace; if (copyhdr) { M_DUP_PKTHDR(n, m); if (len != M_COPYALL) n->m_pkthdr.len = len; copyhdr = 0; } n->m_len = min(len, m->m_len - off); if (m->m_flags & M_EXT) { if (!deep) { n->m_data = m->m_data + off; n->m_ext = m->m_ext; MCLADDREFERENCE(m, n); } else { /* * we are unsure about the way m was allocated. * copy into multiple MCLBYTES cluster mbufs. */ MCLGET(n, wait); n->m_len = 0; n->m_len = M_TRAILINGSPACE(n); n->m_len = min(n->m_len, len); n->m_len = min(n->m_len, m->m_len - off); memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, (unsigned)n->m_len); } } else memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, (unsigned)n->m_len); if (len != M_COPYALL) len -= n->m_len; off += n->m_len; #ifdef DIAGNOSTIC if (off > m->m_len) panic("m_copym0 overrun"); #endif if (off == m->m_len) { m = m->m_next; off = 0; } np = &n->m_next; } if (top == NULL) MCFail++; return (top); nospace: m_freem(top); MCFail++; return (NULL); }
/* * Add an mfc entry */ static int add_m6fc(struct mf6cctl *mfccp) { struct mf6c *rt; u_long hash; struct rtdetq *rte; u_short nstl; int s; MF6CFIND(mfccp->mf6cc_origin.sin6_addr, mfccp->mf6cc_mcastgrp.sin6_addr, rt); /* If an entry already exists, just update the fields */ if (rt) { #ifdef MRT6DEBUG if (mrt6debug & DEBUG_MFC) log(LOG_DEBUG,"add_m6fc update o %s g %s p %x\n", ip6_sprintf(&mfccp->mf6cc_origin.sin6_addr), ip6_sprintf(&mfccp->mf6cc_mcastgrp.sin6_addr), mfccp->mf6cc_parent); #endif s = splsoftnet(); rt->mf6c_parent = mfccp->mf6cc_parent; rt->mf6c_ifset = mfccp->mf6cc_ifset; splx(s); return 0; } /* * Find the entry for which the upcall was made and update */ s = splsoftnet(); hash = MF6CHASH(mfccp->mf6cc_origin.sin6_addr, mfccp->mf6cc_mcastgrp.sin6_addr); for (rt = mf6ctable[hash], nstl = 0; rt; rt = rt->mf6c_next) { if (IN6_ARE_ADDR_EQUAL(&rt->mf6c_origin.sin6_addr, &mfccp->mf6cc_origin.sin6_addr) && IN6_ARE_ADDR_EQUAL(&rt->mf6c_mcastgrp.sin6_addr, &mfccp->mf6cc_mcastgrp.sin6_addr) && (rt->mf6c_stall != NULL)) { if (nstl++) log(LOG_ERR, "add_m6fc: %s o %s g %s p %x dbx %p\n", "multiple kernel entries", ip6_sprintf(&mfccp->mf6cc_origin.sin6_addr), ip6_sprintf(&mfccp->mf6cc_mcastgrp.sin6_addr), mfccp->mf6cc_parent, rt->mf6c_stall); #ifdef MRT6DEBUG if (mrt6debug & DEBUG_MFC) log(LOG_DEBUG, "add_m6fc o %s g %s p %x dbg %p\n", ip6_sprintf(&mfccp->mf6cc_origin.sin6_addr), ip6_sprintf(&mfccp->mf6cc_mcastgrp.sin6_addr), mfccp->mf6cc_parent, rt->mf6c_stall); #endif rt->mf6c_origin = mfccp->mf6cc_origin; rt->mf6c_mcastgrp = mfccp->mf6cc_mcastgrp; rt->mf6c_parent = mfccp->mf6cc_parent; rt->mf6c_ifset = mfccp->mf6cc_ifset; /* initialize pkt counters per src-grp */ rt->mf6c_pkt_cnt = 0; rt->mf6c_byte_cnt = 0; rt->mf6c_wrong_if = 0; rt->mf6c_expire = 0; /* Don't clean this guy up */ n6expire[hash]--; /* free packets Qed at the end of this entry */ for (rte = rt->mf6c_stall; rte != NULL; ) { struct rtdetq *n = rte->next; if (rte->ifp) { ip6_mdq(rte->m, rte->ifp, rt); } m_freem(rte->m); #ifdef UPCALL_TIMING collate(&(rte->t)); #endif /* UPCALL_TIMING */ free(rte, M_MRTABLE); rte = n; } rt->mf6c_stall = NULL; } } /* * It is possible that an entry is being inserted without an upcall */ if (nstl == 0) { #ifdef MRT6DEBUG if (mrt6debug & DEBUG_MFC) log(LOG_DEBUG, "add_mfc no upcall h %ld o %s g %s p %x\n", hash, ip6_sprintf(&mfccp->mf6cc_origin.sin6_addr), ip6_sprintf(&mfccp->mf6cc_mcastgrp.sin6_addr), mfccp->mf6cc_parent); #endif for (rt = mf6ctable[hash]; rt; rt = rt->mf6c_next) { if (IN6_ARE_ADDR_EQUAL(&rt->mf6c_origin.sin6_addr, &mfccp->mf6cc_origin.sin6_addr)&& IN6_ARE_ADDR_EQUAL(&rt->mf6c_mcastgrp.sin6_addr, &mfccp->mf6cc_mcastgrp.sin6_addr)) { rt->mf6c_origin = mfccp->mf6cc_origin; rt->mf6c_mcastgrp = mfccp->mf6cc_mcastgrp; rt->mf6c_parent = mfccp->mf6cc_parent; rt->mf6c_ifset = mfccp->mf6cc_ifset; /* initialize pkt counters per src-grp */ rt->mf6c_pkt_cnt = 0; rt->mf6c_byte_cnt = 0; rt->mf6c_wrong_if = 0; if (rt->mf6c_expire) n6expire[hash]--; rt->mf6c_expire = 0; } } if (rt == NULL) { /* no upcall, so make a new entry */ rt = (struct mf6c *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT); if (rt == NULL) { splx(s); return ENOBUFS; } /* insert new entry at head of hash chain */ rt->mf6c_origin = mfccp->mf6cc_origin; rt->mf6c_mcastgrp = mfccp->mf6cc_mcastgrp; rt->mf6c_parent = mfccp->mf6cc_parent; rt->mf6c_ifset = mfccp->mf6cc_ifset; /* initialize pkt counters per src-grp */ rt->mf6c_pkt_cnt = 0; rt->mf6c_byte_cnt = 0; rt->mf6c_wrong_if = 0; rt->mf6c_expire = 0; rt->mf6c_stall = NULL; /* link into table */ rt->mf6c_next = mf6ctable[hash]; mf6ctable[hash] = rt; } } splx(s); return 0; }
/* * Routine to copy from device local memory into mbufs. */ struct mbuf * m_devget(char *buf, int totlen, int off, struct ifnet *ifp, void (*copy)(const void *, void *, size_t)) { struct mbuf *m; struct mbuf *top = NULL, **mp = ⊤ int len; char *cp; char *epkt; cp = buf; epkt = cp + totlen; if (off) { /* * If 'off' is non-zero, packet is trailer-encapsulated, * so we have to skip the type and length fields. */ cp += off + 2 * sizeof(u_int16_t); totlen -= 2 * sizeof(u_int16_t); } MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) return (NULL); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = totlen; m->m_len = MHLEN; while (totlen > 0) { if (top != NULL) { MGET(m, M_DONTWAIT, MT_DATA); if (m == NULL) { m_freem(top); return (NULL); } m->m_len = MLEN; } len = min(totlen, epkt - cp); if (len >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if (m->m_flags & M_EXT) m->m_len = len = min(len, MCLBYTES); else len = m->m_len; } else { /* * Place initial small packet/header at end of mbuf. */ if (len < m->m_len) { if (top == NULL && len + max_linkhdr <= m->m_len) m->m_data += max_linkhdr; m->m_len = len; } else len = m->m_len; } if (copy) copy(cp, mtod(m, caddr_t), (size_t)len); else bcopy(cp, mtod(m, caddr_t), (size_t)len); cp += len; *mp = m; mp = &m->m_next; totlen -= len; if (cp == epkt) cp = buf; } return (top); }
/* * ARCnet output routine. * Encapsulate a packet of type family for the local net. * Assumes that ifp is actually pointer to arccom structure. */ int arc_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { struct arc_header *ah; int error; u_int8_t atype, adst; int loop_copy = 0; int isphds; #if defined(INET) || defined(INET6) struct llentry *lle; #endif if (!((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))) return(ENETDOWN); /* m, m1 aren't initialized yet */ error = 0; switch (dst->sa_family) { #ifdef INET case AF_INET: /* * For now, use the simple IP addr -> ARCnet addr mapping */ if (m->m_flags & (M_BCAST|M_MCAST)) adst = arcbroadcastaddr; /* ARCnet broadcast address */ else if (ifp->if_flags & IFF_NOARP) adst = ntohl(SIN(dst)->sin_addr.s_addr) & 0xFF; else { error = arpresolve(ifp, ro ? ro->ro_rt : NULL, m, dst, &adst, &lle); if (error) return (error == EWOULDBLOCK ? 0 : error); } atype = (ifp->if_flags & IFF_LINK0) ? ARCTYPE_IP_OLD : ARCTYPE_IP; break; case AF_ARP: { struct arphdr *ah; ah = mtod(m, struct arphdr *); ah->ar_hrd = htons(ARPHRD_ARCNET); loop_copy = -1; /* if this is for us, don't do it */ switch(ntohs(ah->ar_op)) { case ARPOP_REVREQUEST: case ARPOP_REVREPLY: atype = ARCTYPE_REVARP; break; case ARPOP_REQUEST: case ARPOP_REPLY: default: atype = ARCTYPE_ARP; break; } if (m->m_flags & M_BCAST) bcopy(ifp->if_broadcastaddr, &adst, ARC_ADDR_LEN); else bcopy(ar_tha(ah), &adst, ARC_ADDR_LEN); } break; #endif #ifdef INET6 case AF_INET6: error = nd6_storelladdr(ifp, m, dst, (u_char *)&adst, &lle); if (error) return (error); atype = ARCTYPE_INET6; break; #endif #ifdef IPX case AF_IPX: adst = SIPX(dst)->sipx_addr.x_host.c_host[5]; atype = ARCTYPE_IPX; if (adst == 0xff) adst = arcbroadcastaddr; break; #endif case AF_UNSPEC: { const struct arc_header *ah; loop_copy = -1; ah = (const struct arc_header *)dst->sa_data; adst = ah->arc_dhost; atype = ah->arc_type; if (atype == ARCTYPE_ARP) { atype = (ifp->if_flags & IFF_LINK0) ? ARCTYPE_ARP_OLD: ARCTYPE_ARP; #ifdef ARCNET_ALLOW_BROKEN_ARP /* * XXX It's not clear per RFC826 if this is needed, but * "assigned numbers" say this is wrong. * However, e.g., AmiTCP 3.0Beta used it... we make this * switchable for emergency cases. Not perfect, but... */ if (ifp->if_flags & IFF_LINK2) mtod(m, struct arphdr *)->ar_pro = atype - 1; #endif } break; } default: if_printf(ifp, "can't handle af%d\n", dst->sa_family); senderr(EAFNOSUPPORT); } isphds = arc_isphds(atype); M_PREPEND(m, isphds ? ARC_HDRNEWLEN : ARC_HDRLEN, M_NOWAIT); if (m == 0) senderr(ENOBUFS); ah = mtod(m, struct arc_header *); ah->arc_type = atype; ah->arc_dhost = adst; ah->arc_shost = ARC_LLADDR(ifp); if (isphds) { ah->arc_flag = 0; ah->arc_seqid = 0; } if ((ifp->if_flags & IFF_SIMPLEX) && (loop_copy != -1)) { if ((m->m_flags & M_BCAST) || (loop_copy > 0)) { struct mbuf *n = m_copy(m, 0, (int)M_COPYALL); (void) if_simloop(ifp, n, dst->sa_family, ARC_HDRLEN); } else if (ah->arc_dhost == ah->arc_shost) { (void) if_simloop(ifp, m, dst->sa_family, ARC_HDRLEN); return (0); /* XXX */ } } BPF_MTAP(ifp, m); error = ifp->if_transmit(ifp, m); return (error); bad: if (m) m_freem(m); return (error); }
static int kttcp_soreceive(struct socket *so, unsigned long long slen, unsigned long long *done, struct lwp *l, int *flagsp) { struct mbuf *m, **mp; int flags, len, error, offset, moff, type; long long orig_resid, resid; const struct protosw *pr; struct mbuf *nextrecord; pr = so->so_proto; mp = NULL; type = 0; resid = orig_resid = slen; if (flagsp) flags = *flagsp &~ MSG_EOR; else flags = 0; if (flags & MSG_OOB) { m = m_get(M_WAIT, MT_DATA); solock(so); error = (*pr->pr_usrreqs->pr_recvoob)(so, m, flags & MSG_PEEK); sounlock(so); if (error) goto bad; do { resid -= min(resid, m->m_len); m = m_free(m); } while (resid && error == 0 && m); bad: if (m) m_freem(m); return (error); } if (mp) *mp = NULL; solock(so); restart: if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) return (error); m = so->so_rcv.sb_mb; /* * If we have less data than requested, block awaiting more * (subject to any timeout) if: * 1. the current count is less than the low water mark, * 2. MSG_WAITALL is set, and it is possible to do the entire * receive operation at once if we block (resid <= hiwat), or * 3. MSG_DONTWAIT is not set. * If MSG_WAITALL is set but resid is larger than the receive buffer, * we have to do the receive in sections, and thus risk returning * a short count if a timeout or signal occurs after we start. */ if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && so->so_rcv.sb_cc < resid) && (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || ((flags & MSG_WAITALL) && resid <= so->so_rcv.sb_hiwat)) && m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { #ifdef DIAGNOSTIC if (m == NULL && so->so_rcv.sb_cc) panic("receive 1"); #endif if (so->so_error) { if (m) goto dontblock; error = so->so_error; if ((flags & MSG_PEEK) == 0) so->so_error = 0; goto release; } if (so->so_state & SS_CANTRCVMORE) { if (m) goto dontblock; else goto release; } for (; m; m = m->m_next) if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { m = so->so_rcv.sb_mb; goto dontblock; } if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && (so->so_proto->pr_flags & PR_CONNREQUIRED)) { error = ENOTCONN; goto release; } if (resid == 0) goto release; if ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO))) { error = EWOULDBLOCK; goto release; } sbunlock(&so->so_rcv); error = sbwait(&so->so_rcv); if (error) { sounlock(so); return (error); } goto restart; } dontblock: /* * On entry here, m points to the first record of the socket buffer. * While we process the initial mbufs containing address and control * info, we save a copy of m->m_nextpkt into nextrecord. */ #ifdef notyet /* XXXX */ if (uio->uio_lwp) uio->uio_lwp->l_ru.ru_msgrcv++; #endif KASSERT(m == so->so_rcv.sb_mb); SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 1"); SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 1"); nextrecord = m->m_nextpkt; if (pr->pr_flags & PR_ADDR) { #ifdef DIAGNOSTIC if (m->m_type != MT_SONAME) panic("receive 1a"); #endif orig_resid = 0; if (flags & MSG_PEEK) { m = m->m_next; } else { sbfree(&so->so_rcv, m); MFREE(m, so->so_rcv.sb_mb); m = so->so_rcv.sb_mb; } } while (m && m->m_type == MT_CONTROL && error == 0) { if (flags & MSG_PEEK) { m = m->m_next; } else { sbfree(&so->so_rcv, m); MFREE(m, so->so_rcv.sb_mb); m = so->so_rcv.sb_mb; } } /* * If m is non-NULL, we have some data to read. From now on, * make sure to keep sb_lastrecord consistent when working on * the last packet on the chain (nextrecord == NULL) and we * change m->m_nextpkt. */ if (m) { if ((flags & MSG_PEEK) == 0) { m->m_nextpkt = nextrecord; /* * If nextrecord == NULL (this is a single chain), * then sb_lastrecord may not be valid here if m * was changed earlier. */ if (nextrecord == NULL) { KASSERT(so->so_rcv.sb_mb == m); so->so_rcv.sb_lastrecord = m; } } type = m->m_type; if (type == MT_OOBDATA) flags |= MSG_OOB; } else { if ((flags & MSG_PEEK) == 0) { KASSERT(so->so_rcv.sb_mb == m); so->so_rcv.sb_mb = nextrecord; SB_EMPTY_FIXUP(&so->so_rcv); } } SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 2"); SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 2"); moff = 0; offset = 0; while (m && resid > 0 && error == 0) { if (m->m_type == MT_OOBDATA) { if (type != MT_OOBDATA) break; } else if (type == MT_OOBDATA) break; #ifdef DIAGNOSTIC else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) panic("receive 3"); #endif so->so_state &= ~SS_RCVATMARK; len = resid; if (so->so_oobmark && len > so->so_oobmark - offset) len = so->so_oobmark - offset; if (len > m->m_len - moff) len = m->m_len - moff; /* * If mp is set, just pass back the mbufs. * Otherwise copy them out via the uio, then free. * Sockbuf must be consistent here (points to current mbuf, * it points to next record) when we drop priority; * we must note any additions to the sockbuf when we * block interrupts again. */ resid -= len; if (len == m->m_len - moff) { if (m->m_flags & M_EOR) flags |= MSG_EOR; if (flags & MSG_PEEK) { m = m->m_next; moff = 0; } else { nextrecord = m->m_nextpkt; sbfree(&so->so_rcv, m); if (mp) { *mp = m; mp = &m->m_next; so->so_rcv.sb_mb = m = m->m_next; *mp = NULL; } else { MFREE(m, so->so_rcv.sb_mb); m = so->so_rcv.sb_mb; } /* * If m != NULL, we also know that * so->so_rcv.sb_mb != NULL. */ KASSERT(so->so_rcv.sb_mb == m); if (m) { m->m_nextpkt = nextrecord; if (nextrecord == NULL) so->so_rcv.sb_lastrecord = m; } else { so->so_rcv.sb_mb = nextrecord; SB_EMPTY_FIXUP(&so->so_rcv); } SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 3"); SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 3"); } } else { if (flags & MSG_PEEK) moff += len; else { if (mp) { sounlock(so); *mp = m_copym(m, 0, len, M_WAIT); solock(so); } m->m_data += len; m->m_len -= len; so->so_rcv.sb_cc -= len; } } if (so->so_oobmark) { if ((flags & MSG_PEEK) == 0) { so->so_oobmark -= len; if (so->so_oobmark == 0) { so->so_state |= SS_RCVATMARK; break; } } else { offset += len; if (offset == so->so_oobmark) break; } } if (flags & MSG_EOR) break; /* * If the MSG_WAITALL flag is set (for non-atomic socket), * we must not quit until "uio->uio_resid == 0" or an error * termination. If a signal/timeout occurs, return * with a short count but without error. * Keep sockbuf locked against other readers. */ while (flags & MSG_WAITALL && m == NULL && resid > 0 && !sosendallatonce(so) && !nextrecord) { if (so->so_error || so->so_state & SS_CANTRCVMORE) break; /* * If we are peeking and the socket receive buffer is * full, stop since we can't get more data to peek at. */ if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0) break; /* * If we've drained the socket buffer, tell the * protocol in case it needs to do something to * get it filled again. */ if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) { (*pr->pr_usrreqs->pr_rcvd)(so, flags, l); } SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive sbwait 2"); SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive sbwait 2"); error = sbwait(&so->so_rcv); if (error) { sbunlock(&so->so_rcv); sounlock(so); return (0); } if ((m = so->so_rcv.sb_mb) != NULL) nextrecord = m->m_nextpkt; } } if (m && pr->pr_flags & PR_ATOMIC) { flags |= MSG_TRUNC; if ((flags & MSG_PEEK) == 0) (void) sbdroprecord(&so->so_rcv); } if ((flags & MSG_PEEK) == 0) { if (m == NULL) { /* * First part is an SB_EMPTY_FIXUP(). Second part * makes sure sb_lastrecord is up-to-date if * there is still data in the socket buffer. */ so->so_rcv.sb_mb = nextrecord; if (so->so_rcv.sb_mb == NULL) { so->so_rcv.sb_mbtail = NULL; so->so_rcv.sb_lastrecord = NULL; } else if (nextrecord->m_nextpkt == NULL) so->so_rcv.sb_lastrecord = nextrecord; } SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 4"); SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 4"); if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) { (*pr->pr_usrreqs->pr_rcvd)(so, flags, l); } } if (orig_resid == resid && orig_resid && (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { sbunlock(&so->so_rcv); goto restart; } if (flagsp) *flagsp |= flags; release: sbunlock(&so->so_rcv); sounlock(so); *done = slen - resid; #if 0 printf("soreceive: error %d slen %llu resid %lld\n", error, slen, resid); #endif return (error); }