int esp4_input(struct mbuf **mp, int *offp, int proto) { int off; struct ip *ip; struct esp *esp; struct esptail esptail; struct mbuf *m; u_int32_t spi; struct secasvar *sav = NULL; size_t taillen; u_int16_t nxt; const struct esp_algorithm *algo; int ivlen; size_t hlen; size_t esplen; off = *offp; m = *mp; *mp = NULL; /* sanity check for alignment. */ if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) { ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem " "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len)); ipsecstat.in_inval++; goto bad; } if (m->m_len < off + ESPMAXLEN) { m = m_pullup(m, off + ESPMAXLEN); if (!m) { ipseclog((LOG_DEBUG, "IPv4 ESP input: can't pullup in esp4_input\n")); ipsecstat.in_inval++; goto bad; } } ip = mtod(m, struct ip *); esp = (struct esp *)(((u_int8_t *)ip) + off); #ifdef _IP_VHL hlen = IP_VHL_HL(ip->ip_vhl) << 2; #else hlen = ip->ip_hl << 2; #endif /* find the sassoc. */ spi = esp->esp_spi; if ((sav = key_allocsa(AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, IPPROTO_ESP, spi)) == 0) { ipseclog((LOG_WARNING, "IPv4 ESP input: no key association found for spi %u\n", (u_int32_t)ntohl(spi))); ipsecstat.in_nosa++; goto bad; } KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP esp4_input called to allocate SA:%p\n", sav)); if (sav->state != SADB_SASTATE_MATURE && sav->state != SADB_SASTATE_DYING) { ipseclog((LOG_DEBUG, "IPv4 ESP input: non-mature/dying SA found for spi %u\n", (u_int32_t)ntohl(spi))); ipsecstat.in_badspi++; goto bad; } algo = esp_algorithm_lookup(sav->alg_enc); if (!algo) { ipseclog((LOG_DEBUG, "IPv4 ESP input: " "unsupported encryption algorithm for spi %u\n", (u_int32_t)ntohl(spi))); ipsecstat.in_badspi++; goto bad; } /* check if we have proper ivlen information */ ivlen = sav->ivlen; if (ivlen < 0) { ipseclog((LOG_ERR, "improper ivlen in IPv4 ESP input: %s %s\n", ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); ipsecstat.in_inval++; goto bad; } if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay && (sav->alg_auth && sav->key_auth))) goto noreplaycheck; if (sav->alg_auth == SADB_X_AALG_NULL || sav->alg_auth == SADB_AALG_NONE) goto noreplaycheck; /* * check for sequence number. */ if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) ; /* okey */ else { ipsecstat.in_espreplay++; ipseclog((LOG_WARNING, "replay packet in IPv4 ESP input: %s %s\n", ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); goto bad; } /* check ICV */ { u_char sum0[AH_MAXSUMSIZE]; u_char sum[AH_MAXSUMSIZE]; const struct ah_algorithm *sumalgo; size_t siz; sumalgo = ah_algorithm_lookup(sav->alg_auth); if (!sumalgo) goto noreplaycheck; siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1)); if (m->m_pkthdr.len < off + ESPMAXLEN + siz) { ipsecstat.in_inval++; goto bad; } if (AH_MAXSUMSIZE < siz) { ipseclog((LOG_DEBUG, "internal error: AH_MAXSUMSIZE must be larger than %lu\n", (u_long)siz)); ipsecstat.in_inval++; goto bad; } m_copydata(m, m->m_pkthdr.len - siz, siz, &sum0[0]); if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) { ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n", ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); ipsecstat.in_espauthfail++; goto bad; } if (bcmp(sum0, sum, siz) != 0) { ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n", ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); ipsecstat.in_espauthfail++; goto bad; } /* strip off the authentication data */ m_adj(m, -siz); ip = mtod(m, struct ip *); #ifdef IPLEN_FLIPPED ip->ip_len = ip->ip_len - siz; #else ip->ip_len = htons(ntohs(ip->ip_len) - siz); #endif m->m_flags |= M_AUTHIPDGM; ipsecstat.in_espauthsucc++; } /* * update sequence number. */ if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) { if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) { ipsecstat.in_espreplay++; goto bad; } } noreplaycheck: /* process main esp header. */ if (sav->flags & SADB_X_EXT_OLD) { /* RFC 1827 */ esplen = sizeof(struct esp); } else { /* RFC 2406 */ if (sav->flags & SADB_X_EXT_DERIV) esplen = sizeof(struct esp); else esplen = sizeof(struct newesp); } if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) { ipseclog((LOG_WARNING, "IPv4 ESP input: packet too short\n")); ipsecstat.in_inval++; goto bad; } if (m->m_len < off + esplen + ivlen) { m = m_pullup(m, off + esplen + ivlen); if (!m) { ipseclog((LOG_DEBUG, "IPv4 ESP input: can't pullup in esp4_input\n")); ipsecstat.in_inval++; goto bad; } } /* * pre-compute and cache intermediate key */ if (esp_schedule(algo, sav) != 0) { ipsecstat.in_inval++; goto bad; } /* * decrypt the packet. */ if (!algo->decrypt) panic("internal error: no decrypt function"); if ((*algo->decrypt)(m, off, sav, algo, ivlen)) { /* m is already freed */ m = NULL; ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n", ipsec_logsastr(sav))); ipsecstat.in_inval++; goto bad; } ipsecstat.in_esphist[sav->alg_enc]++; m->m_flags |= M_DECRYPTED; /* * find the trailer of the ESP. */ m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail), (caddr_t)&esptail); nxt = esptail.esp_nxt; taillen = esptail.esp_padlen + sizeof(esptail); if (m->m_pkthdr.len < taillen || m->m_pkthdr.len - taillen < off + esplen + ivlen + sizeof(esptail)) { ipseclog((LOG_WARNING, "bad pad length in IPv4 ESP input: %s %s\n", ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); ipsecstat.in_inval++; goto bad; } /* strip off the trailing pad area. */ m_adj(m, -taillen); #ifdef IPLEN_FLIPPED ip->ip_len = ip->ip_len - taillen; #else ip->ip_len = htons(ntohs(ip->ip_len) - taillen); #endif /* was it transmitted over the IPsec tunnel SA? */ if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav)) { /* * strip off all the headers that precedes ESP header. * IP4 xx ESP IP4' payload -> IP4' payload * * XXX more sanity checks * XXX relationship with gif? */ u_int8_t tos; tos = ip->ip_tos; m_adj(m, off + esplen + ivlen); if (m->m_len < sizeof(*ip)) { m = m_pullup(m, sizeof(*ip)); if (!m) { ipsecstat.in_inval++; goto bad; } } ip = mtod(m, struct ip *); /* ECN consideration. */ ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos); if (!key_checktunnelsanity(sav, AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) { ipseclog((LOG_ERR, "ipsec tunnel address mismatch " "in IPv4 ESP input: %s %s\n", ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); ipsecstat.in_inval++; goto bad; } key_sa_recordxfer(sav, m); if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 || ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) { ipsecstat.in_nomem++; goto bad; } if (netisr_queue(NETISR_IP, m)) { ipsecstat.in_inval++; m = NULL; goto bad; } nxt = IPPROTO_DONE; } else {
void ah4_input(struct mbuf *m, int off) { struct ip *ip; struct ah *ah; u_int32_t spi; const struct ah_algorithm *algo; size_t siz; size_t siz1; u_char *cksum; struct secasvar *sav = NULL; u_int16_t nxt; size_t hlen; size_t stripsiz = 0; sa_family_t ifamily; if (m->m_len < off + sizeof(struct newah)) { m = m_pullup(m, off + sizeof(struct newah)); if (!m) { ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup;" "dropping the packet for simplicity\n")); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto fail; } } /* Expect 32-bit aligned data pointer on strict-align platforms */ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); ip = mtod(m, struct ip *); ah = (struct ah *)(void *)(((caddr_t)ip) + off); nxt = ah->ah_nxt; #ifdef _IP_VHL hlen = IP_VHL_HL(ip->ip_vhl) << 2; #else hlen = ip->ip_hl << 2; #endif /* find the sassoc. */ spi = ah->ah_spi; if ((sav = key_allocsa(AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, IPPROTO_AH, spi)) == 0) { ipseclog((LOG_WARNING, "IPv4 AH input: no key association found for spi %u\n", (u_int32_t)ntohl(spi))); IPSEC_STAT_INCREMENT(ipsecstat.in_nosa); goto fail; } KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP ah4_input called to allocate SA:0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(sav))); if (sav->state != SADB_SASTATE_MATURE && sav->state != SADB_SASTATE_DYING) { ipseclog((LOG_DEBUG, "IPv4 AH input: non-mature/dying SA found for spi %u\n", (u_int32_t)ntohl(spi))); IPSEC_STAT_INCREMENT(ipsecstat.in_badspi); goto fail; } algo = ah_algorithm_lookup(sav->alg_auth); if (!algo) { ipseclog((LOG_DEBUG, "IPv4 AH input: " "unsupported authentication algorithm for spi %u\n", (u_int32_t)ntohl(spi))); IPSEC_STAT_INCREMENT(ipsecstat.in_badspi); goto fail; } siz = (*algo->sumsiz)(sav); siz1 = ((siz + 3) & ~(4 - 1)); /* * sanity checks for header, 1. */ { int sizoff; sizoff = (sav->flags & SADB_X_EXT_OLD) ? 0 : 4; /* * Here, we do not do "siz1 == siz". This is because the way * RFC240[34] section 2 is written. They do not require truncation * to 96 bits. * For example, Microsoft IPsec stack attaches 160 bits of * authentication data for both hmac-md5 and hmac-sha1. For hmac-sha1, * 32 bits of padding is attached. * * There are two downsides to this specification. * They have no real harm, however, they leave us fuzzy feeling. * - if we attach more than 96 bits of authentication data onto AH, * we will never notice about possible modification by rogue * intermediate nodes. * Since extra bits in AH checksum is never used, this constitutes * no real issue, however, it is wacky. * - even if the peer attaches big authentication data, we will never * notice the difference, since longer authentication data will just * work. * * We may need some clarification in the spec. */ if (siz1 < siz) { ipseclog((LOG_NOTICE, "sum length too short in IPv4 AH input " "(%lu, should be at least %lu): %s\n", (u_int32_t)siz1, (u_int32_t)siz, ipsec4_logpacketstr(ip, spi))); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto fail; } if ((ah->ah_len << 2) - sizoff != siz1) { ipseclog((LOG_NOTICE, "sum length mismatch in IPv4 AH input " "(%d should be %lu): %s\n", (ah->ah_len << 2) - sizoff, (u_int32_t)siz1, ipsec4_logpacketstr(ip, spi))); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto fail; } if (m->m_len < off + sizeof(struct ah) + sizoff + siz1) { m = m_pullup(m, off + sizeof(struct ah) + sizoff + siz1); if (!m) { ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup\n")); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto fail; } /* Expect 32-bit aligned data ptr on strict-align platforms */ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); ip = mtod(m, struct ip *); ah = (struct ah *)(void *)(((caddr_t)ip) + off); } }