static int esp_gcm_mature(struct secasvar *sav) { int keylen; const struct esp_algorithm *algo; if (sav->flags & SADB_X_EXT_OLD) { ipseclog((LOG_ERR, "esp_gcm_mature: algorithm incompatible with esp-old\n")); return 1; } if (sav->flags & SADB_X_EXT_DERIV) { ipseclog((LOG_ERR, "esp_gcm_mature: algorithm incompatible with derived\n")); return 1; } if (sav->flags & SADB_X_EXT_IIV) { ipseclog((LOG_ERR, "esp_gcm_mature: implicit IV not currently implemented\n")); return 1; } if (!sav->key_enc) { ipseclog((LOG_ERR, "esp_gcm_mature: no key is given.\n")); return 1; } algo = esp_algorithm_lookup(sav->alg_enc); if (!algo) { ipseclog((LOG_ERR, "esp_gcm_mature: unsupported algorithm.\n")); return 1; } keylen = sav->key_enc->sadb_key_bits; if (keylen < algo->keymin || algo->keymax < keylen) { ipseclog((LOG_ERR, "esp_gcm_mature %s: invalid key length %d.\n", algo->name, sav->key_enc->sadb_key_bits)); return 1; } switch (sav->alg_enc) { case SADB_X_EALG_AES_GCM: /* allows specific key sizes only */ if (!(keylen == ESP_AESGCM_KEYLEN128 || keylen == ESP_AESGCM_KEYLEN192 || keylen == ESP_AESGCM_KEYLEN256)) { ipseclog((LOG_ERR, "esp_gcm_mature %s: invalid key length %d.\n", algo->name, keylen)); return 1; } break; default: ipseclog((LOG_ERR, "esp_gcm_mature %s: invalid algo %d.\n", sav->alg_enc)); return 1; } return 0; }
/* * Modify the packet so that it includes the authentication data. * The mbuf passed must start with IPv4 header. * * assumes that the first mbuf contains IPv4 header + option only. * the function does not modify m. */ int ah4_output(struct mbuf *m, struct ipsecrequest *isr) { struct secasvar *sav = isr->sav; const struct ah_algorithm *algo; u_int32_t spi; u_char *ahdrpos; u_int8_t *ahsumpos = NULL; size_t hlen = 0; /* IP header+option in bytes */ size_t plen = 0; /* AH payload size in bytes */ size_t ahlen = 0; /* plen + sizeof(ah) */ struct ip *ip; struct in_addr dst; struct in_addr *finaldst; int error; dst.s_addr = 0; /* XXX: GCC */ /* sanity checks */ if ((sav->flags & SADB_X_EXT_OLD) == 0 && !sav->replay) { ip = mtod(m, struct ip *); ipseclog((LOG_DEBUG, "ah4_output: internal error: " "sav->replay is null: %x->%x, SPI=%u\n", (u_int32_t)ntohl(ip->ip_src.s_addr), (u_int32_t)ntohl(ip->ip_dst.s_addr), (u_int32_t)ntohl(sav->spi))); IPSEC_STATINC(IPSEC_STAT_OUT_INVAL); error = EINVAL; goto fail; }
static int esp_descbc_mature(struct secasvar *sav) { const struct esp_algorithm *algo; if (!(sav->flags & SADB_X_EXT_OLD) && (sav->flags & SADB_X_EXT_IV4B)) { ipseclog((LOG_ERR, "esp_cbc_mature: " "algorithm incompatible with 4 octets IV length\n")); return 1; } if (!sav->key_enc) { ipseclog((LOG_ERR, "esp_descbc_mature: no key is given.\n")); return 1; } algo = esp_algorithm_lookup(sav->alg_enc); if (!algo) { ipseclog((LOG_ERR, "esp_descbc_mature: unsupported algorithm.\n")); return 1; } if (_KEYBITS(sav->key_enc) < algo->keymin || _KEYBITS(sav->key_enc) > algo->keymax) { ipseclog((LOG_ERR, "esp_descbc_mature: invalid key length %d.\n", _KEYBITS(sav->key_enc))); return 1; } /* weak key check */ if (des_is_weak_key((des_cblock *)_KEYBUF(sav->key_enc))) { ipseclog((LOG_ERR, "esp_descbc_mature: weak key was passed.\n")); return 1; } return 0; }
int esp_schedule(const struct esp_algorithm *algo, struct secasvar *sav) { int error; /* check for key length */ if (_KEYBITS(sav->key_enc) < algo->keymin || _KEYBITS(sav->key_enc) > algo->keymax) { ipseclog((LOG_ERR, "esp_schedule %s: unsupported key length %d: " "needs %d to %d bits\n", algo->name, _KEYBITS(sav->key_enc), algo->keymin, algo->keymax)); return EINVAL; } /* already allocated */ if (sav->sched && sav->schedlen != 0) return 0; /* no schedule necessary */ if (!algo->schedule || !algo->schedlen) return 0; sav->schedlen = (*algo->schedlen)(algo); sav->sched = kmalloc(sav->schedlen, M_SECA, M_NOWAIT); if (!sav->sched) { sav->schedlen = 0; return ENOBUFS; } error = (*algo->schedule)(algo, sav); if (error) { ipseclog((LOG_ERR, "esp_schedule %s: error %d\n", algo->name, error)); bzero(sav->sched, sav->schedlen); kfree(sav->sched, M_SECA); sav->sched = NULL; sav->schedlen = 0; } return error; }
/* * Return a held reference to the default SP. */ static struct secpolicy * key_allocsp_default(const char* where, int tag) { struct secpolicy *sp; KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP key_allocsp_default from %s:%u\n", where, tag)); sp = &V_ip4_def_policy; if (sp->policy != IPSEC_POLICY_DISCARD && sp->policy != IPSEC_POLICY_NONE) { ipseclog((LOG_INFO, "fixed system default policy: %d->%d\n", sp->policy, IPSEC_POLICY_NONE)); sp->policy = IPSEC_POLICY_NONE; } key_addref(sp); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP key_allocsp_default returns SP:%p (%u)\n", sp, sp->refcnt)); return (sp); }
static int deflate_common(struct mbuf *m, struct mbuf *md, size_t *lenp, int mode) /* 0: compress 1: decompress */ { struct mbuf *mprev; struct mbuf *p; struct mbuf *n = NULL, *n0 = NULL, **np; z_stream zs; int error = 0; int zerror; size_t offset; #define MOREBLOCK() \ do { \ /* keep the reply buffer into our chain */ \ if (n) { \ n->m_len = zs.total_out - offset; \ offset = zs.total_out; \ *np = n; \ np = &n->m_next; \ n = NULL; \ } \ \ /* get a fresh reply buffer */ \ n = m_getcl(M_NOWAIT, MT_DATA, 0); \ if (!n) { \ error = ENOBUFS; \ goto fail; \ } \ n->m_len = 0; \ n->m_len = M_TRAILINGSPACE(n); \ n->m_next = NULL; \ /* \ * if this is the first reply buffer, reserve \ * region for ipcomp header. \ */ \ if (*np == NULL) { \ n->m_len -= sizeof(struct ipcomp); \ n->m_data += sizeof(struct ipcomp); \ } \ \ zs.next_out = mtod(n, u_int8_t *); \ zs.avail_out = n->m_len; \ } while (0) for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) ; if (!mprev) panic("md is not in m in deflate_common"); bzero(&zs, sizeof(zs)); zs.zalloc = deflate_alloc; zs.zfree = deflate_free; zerror = mode ? inflateInit2(&zs, deflate_window_in) : deflateInit2(&zs, deflate_policy, Z_DEFLATED, deflate_window_out, deflate_memlevel, Z_DEFAULT_STRATEGY); if (zerror != Z_OK) { error = ENOBUFS; goto fail; } n0 = n = NULL; np = &n0; offset = 0; zerror = 0; p = md; while (p && p->m_len == 0) { p = p->m_next; } /* input stream and output stream are available */ while (p && zs.avail_in == 0) { /* get input buffer */ if (p && zs.avail_in == 0) { zs.next_in = mtod(p, u_int8_t *); zs.avail_in = p->m_len; p = p->m_next; while (p && p->m_len == 0) { p = p->m_next; } } /* get output buffer */ if (zs.next_out == NULL || zs.avail_out == 0) { MOREBLOCK(); } zerror = mode ? inflate(&zs, Z_NO_FLUSH) : deflate(&zs, Z_NO_FLUSH); if (zerror == Z_STREAM_END) ; /* once more. */ else if (zerror == Z_OK) { /* inflate: Z_OK can indicate the end of decode */ if (mode && !p && zs.avail_out != 0) goto terminate; else ; /* once more. */ } else { if (zs.msg) { ipseclog((LOG_ERR, "ipcomp_%scompress: " "%sflate(Z_NO_FLUSH): %s\n", mode ? "de" : "", mode ? "in" : "de", zs.msg)); } else { ipseclog((LOG_ERR, "ipcomp_%scompress: " "%sflate(Z_NO_FLUSH): unknown error (%d)\n", mode ? "de" : "", mode ? "in" : "de", zerror)); } mode ? inflateEnd(&zs) : deflateEnd(&zs); error = EINVAL; goto fail; } }
int esp4_input(struct mbuf **mp, int *offp, int proto) { int off; struct ip *ip; struct esp *esp; struct esptail esptail; struct mbuf *m; u_int32_t spi; struct secasvar *sav = NULL; size_t taillen; u_int16_t nxt; const struct esp_algorithm *algo; int ivlen; size_t hlen; size_t esplen; off = *offp; m = *mp; *mp = NULL; /* sanity check for alignment. */ if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) { ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem " "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len)); ipsecstat.in_inval++; goto bad; } if (m->m_len < off + ESPMAXLEN) { m = m_pullup(m, off + ESPMAXLEN); if (!m) { ipseclog((LOG_DEBUG, "IPv4 ESP input: can't pullup in esp4_input\n")); ipsecstat.in_inval++; goto bad; } } ip = mtod(m, struct ip *); esp = (struct esp *)(((u_int8_t *)ip) + off); #ifdef _IP_VHL hlen = IP_VHL_HL(ip->ip_vhl) << 2; #else hlen = ip->ip_hl << 2; #endif /* find the sassoc. */ spi = esp->esp_spi; if ((sav = key_allocsa(AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, IPPROTO_ESP, spi)) == 0) { ipseclog((LOG_WARNING, "IPv4 ESP input: no key association found for spi %u\n", (u_int32_t)ntohl(spi))); ipsecstat.in_nosa++; goto bad; } KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP esp4_input called to allocate SA:%p\n", sav)); if (sav->state != SADB_SASTATE_MATURE && sav->state != SADB_SASTATE_DYING) { ipseclog((LOG_DEBUG, "IPv4 ESP input: non-mature/dying SA found for spi %u\n", (u_int32_t)ntohl(spi))); ipsecstat.in_badspi++; goto bad; } algo = esp_algorithm_lookup(sav->alg_enc); if (!algo) { ipseclog((LOG_DEBUG, "IPv4 ESP input: " "unsupported encryption algorithm for spi %u\n", (u_int32_t)ntohl(spi))); ipsecstat.in_badspi++; goto bad; } /* check if we have proper ivlen information */ ivlen = sav->ivlen; if (ivlen < 0) { ipseclog((LOG_ERR, "improper ivlen in IPv4 ESP input: %s %s\n", ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); ipsecstat.in_inval++; goto bad; } if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay && (sav->alg_auth && sav->key_auth))) goto noreplaycheck; if (sav->alg_auth == SADB_X_AALG_NULL || sav->alg_auth == SADB_AALG_NONE) goto noreplaycheck; /* * check for sequence number. */ if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) ; /* okey */ else { ipsecstat.in_espreplay++; ipseclog((LOG_WARNING, "replay packet in IPv4 ESP input: %s %s\n", ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); goto bad; } /* check ICV */ { u_char sum0[AH_MAXSUMSIZE]; u_char sum[AH_MAXSUMSIZE]; const struct ah_algorithm *sumalgo; size_t siz; sumalgo = ah_algorithm_lookup(sav->alg_auth); if (!sumalgo) goto noreplaycheck; siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1)); if (m->m_pkthdr.len < off + ESPMAXLEN + siz) { ipsecstat.in_inval++; goto bad; } if (AH_MAXSUMSIZE < siz) { ipseclog((LOG_DEBUG, "internal error: AH_MAXSUMSIZE must be larger than %lu\n", (u_long)siz)); ipsecstat.in_inval++; goto bad; } m_copydata(m, m->m_pkthdr.len - siz, siz, &sum0[0]); if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) { ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n", ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); ipsecstat.in_espauthfail++; goto bad; } if (bcmp(sum0, sum, siz) != 0) { ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n", ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); ipsecstat.in_espauthfail++; goto bad; } /* strip off the authentication data */ m_adj(m, -siz); ip = mtod(m, struct ip *); #ifdef IPLEN_FLIPPED ip->ip_len = ip->ip_len - siz; #else ip->ip_len = htons(ntohs(ip->ip_len) - siz); #endif m->m_flags |= M_AUTHIPDGM; ipsecstat.in_espauthsucc++; } /* * update sequence number. */ if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) { if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) { ipsecstat.in_espreplay++; goto bad; } } noreplaycheck: /* process main esp header. */ if (sav->flags & SADB_X_EXT_OLD) { /* RFC 1827 */ esplen = sizeof(struct esp); } else { /* RFC 2406 */ if (sav->flags & SADB_X_EXT_DERIV) esplen = sizeof(struct esp); else esplen = sizeof(struct newesp); } if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) { ipseclog((LOG_WARNING, "IPv4 ESP input: packet too short\n")); ipsecstat.in_inval++; goto bad; } if (m->m_len < off + esplen + ivlen) { m = m_pullup(m, off + esplen + ivlen); if (!m) { ipseclog((LOG_DEBUG, "IPv4 ESP input: can't pullup in esp4_input\n")); ipsecstat.in_inval++; goto bad; } } /* * pre-compute and cache intermediate key */ if (esp_schedule(algo, sav) != 0) { ipsecstat.in_inval++; goto bad; } /* * decrypt the packet. */ if (!algo->decrypt) panic("internal error: no decrypt function"); if ((*algo->decrypt)(m, off, sav, algo, ivlen)) { /* m is already freed */ m = NULL; ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n", ipsec_logsastr(sav))); ipsecstat.in_inval++; goto bad; } ipsecstat.in_esphist[sav->alg_enc]++; m->m_flags |= M_DECRYPTED; /* * find the trailer of the ESP. */ m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail), (caddr_t)&esptail); nxt = esptail.esp_nxt; taillen = esptail.esp_padlen + sizeof(esptail); if (m->m_pkthdr.len < taillen || m->m_pkthdr.len - taillen < off + esplen + ivlen + sizeof(esptail)) { ipseclog((LOG_WARNING, "bad pad length in IPv4 ESP input: %s %s\n", ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); ipsecstat.in_inval++; goto bad; } /* strip off the trailing pad area. */ m_adj(m, -taillen); #ifdef IPLEN_FLIPPED ip->ip_len = ip->ip_len - taillen; #else ip->ip_len = htons(ntohs(ip->ip_len) - taillen); #endif /* was it transmitted over the IPsec tunnel SA? */ if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav)) { /* * strip off all the headers that precedes ESP header. * IP4 xx ESP IP4' payload -> IP4' payload * * XXX more sanity checks * XXX relationship with gif? */ u_int8_t tos; tos = ip->ip_tos; m_adj(m, off + esplen + ivlen); if (m->m_len < sizeof(*ip)) { m = m_pullup(m, sizeof(*ip)); if (!m) { ipsecstat.in_inval++; goto bad; } } ip = mtod(m, struct ip *); /* ECN consideration. */ ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos); if (!key_checktunnelsanity(sav, AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) { ipseclog((LOG_ERR, "ipsec tunnel address mismatch " "in IPv4 ESP input: %s %s\n", ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); ipsecstat.in_inval++; goto bad; } key_sa_recordxfer(sav, m); if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 || ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) { ipsecstat.in_nomem++; goto bad; } if (netisr_queue(NETISR_IP, m)) { ipsecstat.in_inval++; m = NULL; goto bad; } nxt = IPPROTO_DONE; } else {
int ip6_ipsec_output(struct mbuf **m, struct inpcb *inp, int *flags, int *error, struct ifnet **ifp, struct secpolicy **sp) { #ifdef IPSEC struct tdb_ident *tdbi; struct m_tag *mtag; /* XXX int s; */ if (sp == NULL) return 1; mtag = m_tag_find(*m, PACKET_TAG_IPSEC_PENDING_TDB, NULL); if (mtag != NULL) { tdbi = (struct tdb_ident *)(mtag + 1); *sp = ipsec_getpolicy(tdbi, IPSEC_DIR_OUTBOUND); if (*sp == NULL) *error = -EINVAL; /* force silent drop */ m_tag_delete(*m, mtag); } else { *sp = ipsec4_checkpolicy(*m, IPSEC_DIR_OUTBOUND, *flags, error, inp); } /* * There are four return cases: * sp != NULL apply IPsec policy * sp == NULL, error == 0 no IPsec handling needed * sp == NULL, error == -EINVAL discard packet w/o error * sp == NULL, error != 0 discard packet, report error */ if (*sp != NULL) { /* Loop detection, check if ipsec processing already done */ KASSERT((*sp)->req != NULL, ("ip_output: no ipsec request")); for (mtag = m_tag_first(*m); mtag != NULL; mtag = m_tag_next(*m, mtag)) { if (mtag->m_tag_cookie != MTAG_ABI_COMPAT) continue; if (mtag->m_tag_id != PACKET_TAG_IPSEC_OUT_DONE && mtag->m_tag_id != PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED) continue; /* * Check if policy has an SA associated with it. * This can happen when an SP has yet to acquire * an SA; e.g. on first reference. If it occurs, * then we let ipsec4_process_packet do its thing. */ if ((*sp)->req->sav == NULL) break; tdbi = (struct tdb_ident *)(mtag + 1); if (tdbi->spi == (*sp)->req->sav->spi && tdbi->proto == (*sp)->req->sav->sah->saidx.proto && bcmp(&tdbi->dst, &(*sp)->req->sav->sah->saidx.dst, sizeof (union sockaddr_union)) == 0) { /* * No IPsec processing is needed, free * reference to SP. * * NB: null pointer to avoid free at * done: below. */ KEY_FREESP(sp), *sp = NULL; /* XXX splx(s); */ goto done; } } /* * Do delayed checksums now because we send before * this is done in the normal processing path. */ if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { ipseclog((LOG_DEBUG, "%s: we do not support IPv4 over IPv6", __func__)); #ifdef INET in_delayed_cksum(*m); #endif (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; } /* * Preserve KAME behaviour: ENOENT can be returned * when an SA acquire is in progress. Don't propagate * this to user-level; it confuses applications. * * XXX this will go away when the SADB is redone. */ if (*error == ENOENT) *error = 0; goto do_ipsec; } else { /* sp == NULL */ if (*error != 0) { /* * Hack: -EINVAL is used to signal that a packet * should be silently discarded. This is typically * because we asked key management for an SA and * it was delayed (e.g. kicked up to IKE). */ if (*error == -EINVAL) *error = 0; goto bad; } else { /* No IPsec processing for this packet. */ } } done: return 0; do_ipsec: return -1; bad: return 1; #endif /* IPSEC */ return 0; }
/* * Modify the packet so that it includes the authentication data. * The mbuf passed must start with IPv4 header. * * assumes that the first mbuf contains IPv4 header + option only. * the function does not modify m. */ int ah4_output(struct mbuf *m, struct ipsecrequest *isr) { struct secasvar *sav = isr->sav; const struct ah_algorithm *algo; u_int32_t spi; u_char *ahdrpos; u_char *ahsumpos = NULL; size_t hlen = 0; /* IP header+option in bytes */ size_t plen = 0; /* AH payload size in bytes */ size_t ahlen = 0; /* plen + sizeof(ah) */ struct ip *ip; struct in_addr dst; struct in_addr *finaldst; int error; /* sanity checks */ if ((sav->flags & SADB_X_EXT_OLD) == 0 && !sav->replay) { struct ip *ip; ip = mtod(m, struct ip *); ipseclog((LOG_DEBUG, "ah4_output: internal error: " "sav->replay is null: %x->%x, SPI=%u\n", (u_int32_t)ntohl(ip->ip_src.s_addr), (u_int32_t)ntohl(ip->ip_dst.s_addr), (u_int32_t)ntohl(sav->spi))); ipsecstat.out_inval++; m_freem(m); return EINVAL; } algo = ah_algorithm_lookup(sav->alg_auth); if (!algo) { ipseclog((LOG_ERR, "ah4_output: unsupported algorithm: " "SPI=%u\n", (u_int32_t)ntohl(sav->spi))); ipsecstat.out_inval++; m_freem(m); return EINVAL; } spi = sav->spi; /* * determine the size to grow. */ if (sav->flags & SADB_X_EXT_OLD) { /* RFC 1826 */ plen = ((*algo->sumsiz)(sav) + 3) & ~(4 - 1); /* XXX pad to 8byte? */ ahlen = plen + sizeof(struct ah); } else { /* RFC 2402 */ plen = ((*algo->sumsiz)(sav) + 3) & ~(4 - 1); /* XXX pad to 8byte? */ ahlen = plen + sizeof(struct newah); } /* * grow the mbuf to accomodate AH. */ ip = mtod(m, struct ip *); #ifdef _IP_VHL hlen = IP_VHL_HL(ip->ip_vhl) << 2; #else hlen = ip->ip_hl << 2; #endif if (m->m_len != hlen) panic("ah4_output: assumption failed (first mbuf length)"); if (M_LEADINGSPACE(m->m_next) < ahlen) { struct mbuf *n; MGET(n, MB_DONTWAIT, MT_DATA); if (!n) { ipseclog((LOG_DEBUG, "ENOBUFS in ah4_output %d\n", __LINE__)); m_freem(m); return ENOBUFS; } n->m_len = ahlen; n->m_next = m->m_next; m->m_next = n; m->m_pkthdr.len += ahlen; ahdrpos = mtod(n, u_char *); } else {
static int esp_cbc_decrypt(struct mbuf *m, size_t off, struct secasvar *sav, const struct esp_algorithm *algo, int ivlen) { struct mbuf *s; struct mbuf *d, *d0, *dp; int soff, doff; /* offset from the head of chain, to head of this mbuf */ int sn, dn; /* offset from the head of the mbuf, to meat */ size_t ivoff, bodyoff; u_int8_t iv[MAXIVLEN] __attribute__((aligned(4))), *ivp; u_int8_t *sbuf = NULL, *sp, *sp_unaligned; u_int8_t *p, *q; struct mbuf *scut; int scutoff; int i, result = 0; int blocklen; int derived; if (ivlen != sav->ivlen || ivlen > sizeof(iv)) { ipseclog((LOG_ERR, "esp_cbc_decrypt %s: " "unsupported ivlen %d\n", algo->name, ivlen)); m_freem(m); return EINVAL; } /* assumes blocklen == padbound */ blocklen = algo->padbound; #if DIAGNOSTIC if (blocklen > sizeof(iv)) { ipseclog((LOG_ERR, "esp_cbc_decrypt %s: " "unsupported blocklen %d\n", algo->name, blocklen)); m_freem(m); return EINVAL; } #endif if (sav->flags & SADB_X_EXT_OLD) { /* RFC 1827 */ ivoff = off + sizeof(struct esp); bodyoff = off + sizeof(struct esp) + ivlen; derived = 0; } else { /* RFC 2406 */ if (sav->flags & SADB_X_EXT_DERIV) { /* * draft-ietf-ipsec-ciph-des-derived-00.txt * uses sequence number field as IV field. */ ivoff = off + sizeof(struct esp); bodyoff = off + sizeof(struct esp) + sizeof(u_int32_t); ivlen = sizeof(u_int32_t); derived = 1; } else { ivoff = off + sizeof(struct newesp); bodyoff = off + sizeof(struct newesp) + ivlen; derived = 0; } } /* grab iv */ m_copydata(m, ivoff, ivlen, (caddr_t) iv); /* extend iv */ if (ivlen == blocklen) ; else if (ivlen == 4 && blocklen == 8) { bcopy(&iv[0], &iv[4], 4); iv[4] ^= 0xff; iv[5] ^= 0xff; iv[6] ^= 0xff; iv[7] ^= 0xff; } else { ipseclog((LOG_ERR, "esp_cbc_encrypt %s: " "unsupported ivlen/blocklen: %d %d\n", algo->name, ivlen, blocklen)); m_freem(m); return EINVAL; } if (m->m_pkthdr.len < bodyoff) { ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%lu\n", algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff)); m_freem(m); return EINVAL; } if ((m->m_pkthdr.len - bodyoff) % blocklen) { ipseclog((LOG_ERR, "esp_cbc_decrypt %s: " "payload length must be multiple of %d\n", algo->name, blocklen)); m_freem(m); return EINVAL; } s = m; d = d0 = dp = NULL; soff = doff = sn = dn = 0; ivp = sp = NULL; /* skip bodyoff */ while (soff < bodyoff) { if (soff + s->m_len > bodyoff) { sn = bodyoff - soff; break; } soff += s->m_len; s = s->m_next; } scut = s; scutoff = sn; /* skip over empty mbuf */ while (s && s->m_len == 0) s = s->m_next; // Allocate blocksized buffer for unaligned or non-contiguous access sbuf = (u_int8_t *)_MALLOC(blocklen, M_SECA, M_DONTWAIT); if (sbuf == NULL) return ENOBUFS; while (soff < m->m_pkthdr.len) { /* source */ if (sn + blocklen <= s->m_len) { /* body is continuous */ sp = mtod(s, u_int8_t *) + sn; } else {
int esp_cbc_decrypt_aes( struct mbuf *m, size_t off, struct secasvar *sav, const struct esp_algorithm *algo, int ivlen) { struct mbuf *s; struct mbuf *d, *d0, *dp; int soff; /* offset from the head of chain, to head of this mbuf */ int sn, dn; /* offset from the head of the mbuf, to meat */ size_t ivoff, bodyoff; u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr; u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL; struct mbuf *scut; int scutoff; int i, len; if (ivlen != AES_BLOCKLEN) { ipseclog((LOG_ERR, "esp_cbc_decrypt %s: " "unsupported ivlen %d\n", algo->name, ivlen)); m_freem(m); return EINVAL; } if (sav->flags & SADB_X_EXT_OLD) { /* RFC 1827 */ ivoff = off + sizeof(struct esp); bodyoff = off + sizeof(struct esp) + ivlen; } else { ivoff = off + sizeof(struct newesp); bodyoff = off + sizeof(struct newesp) + ivlen; } if (m->m_pkthdr.len < bodyoff) { ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%lu\n", algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff)); m_freem(m); return EINVAL; } if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) { ipseclog((LOG_ERR, "esp_cbc_decrypt %s: " "payload length must be multiple of %d\n", algo->name, AES_BLOCKLEN)); m_freem(m); return EINVAL; } /* grab iv */ m_copydata(m, ivoff, ivlen, (caddr_t) iv); s = m; soff = sn = dn = 0; d = d0 = dp = NULL; sp = dptr = NULL; /* skip header/IV offset */ while (soff < bodyoff) { if (soff + s->m_len > bodyoff) { sn = bodyoff - soff; break; } soff += s->m_len; s = s->m_next; } scut = s; scutoff = sn; /* skip over empty mbuf */ while (s && s->m_len == 0) s = s->m_next; while (soff < m->m_pkthdr.len) { /* source */ if (sn + AES_BLOCKLEN <= s->m_len) { /* body is continuous */ sp = mtod(s, u_int8_t *) + sn; len = s->m_len - sn; len -= len % AES_BLOCKLEN; // full blocks only } else {
int esp_schedule(const struct esp_algorithm *algo, struct secasvar *sav) { int error; /* check for key length */ if (_KEYBITS(sav->key_enc) < algo->keymin || _KEYBITS(sav->key_enc) > algo->keymax) { ipseclog((LOG_ERR, "esp_schedule %s: unsupported key length %d: " "needs %d to %d bits\n", algo->name, _KEYBITS(sav->key_enc), algo->keymin, algo->keymax)); return EINVAL; } lck_mtx_lock(sadb_mutex); /* already allocated */ if (sav->sched && sav->schedlen != 0) { lck_mtx_unlock(sadb_mutex); return 0; } /* prevent disallowed implicit IV */ if (((sav->flags & SADB_X_EXT_IIV) != 0) && (sav->alg_enc != SADB_X_EALG_AES_GCM) && (sav->alg_enc != SADB_X_EALG_CHACHA20POLY1305)) { ipseclog((LOG_ERR, "esp_schedule %s: implicit IV not allowed\n", algo->name)); lck_mtx_unlock(sadb_mutex); return EINVAL; } /* no schedule necessary */ if (!algo->schedule || !algo->schedlen) { lck_mtx_unlock(sadb_mutex); return 0; } sav->schedlen = (*algo->schedlen)(algo); if ((signed) sav->schedlen < 0) { lck_mtx_unlock(sadb_mutex); return EINVAL; } //#### that malloc should be replaced by a saved buffer... sav->sched = _MALLOC(sav->schedlen, M_SECA, M_DONTWAIT); if (!sav->sched) { sav->schedlen = 0; lck_mtx_unlock(sadb_mutex); return ENOBUFS; } error = (*algo->schedule)(algo, sav); if (error) { ipseclog((LOG_ERR, "esp_schedule %s: error %d\n", algo->name, error)); bzero(sav->sched, sav->schedlen); FREE(sav->sched, M_SECA); sav->sched = NULL; sav->schedlen = 0; } lck_mtx_unlock(sadb_mutex); return error; }
/* * Modify the packet so that the payload is compressed. * The mbuf (m) must start with IPv4 or IPv6 header. * On failure, free the given mbuf and return non-zero. * * on invocation: * m nexthdrp md * v v v * IP ......... payload * during the encryption: * m nexthdrp mprev md * v v v v * IP ............... ipcomp payload * <-----><-----> * complen plen * <-> hlen * <-----------------> compoff */ static int ipcomp_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, struct ipsecrequest *isr, int af) { struct mbuf *n; struct mbuf *md0; struct mbuf *mcopy; struct mbuf *mprev; struct ipcomp *ipcomp; struct secasvar *sav = isr->sav; const struct ipcomp_algorithm *algo; u_int16_t cpi; /* host order */ size_t plen0, plen; /* payload length to be compressed */ size_t compoff; int afnumber; int error = 0; struct ipsecstat *stat; switch (af) { #ifdef INET case AF_INET: afnumber = 4; stat = &ipsecstat; break; #endif #ifdef INET6 case AF_INET6: afnumber = 6; stat = &ipsec6stat; break; #endif default: ipseclog((LOG_ERR, "ipcomp_output: unsupported af %d\n", af)); return 0; /* no change at all */ } /* grab parameters */ algo = ipcomp_algorithm_lookup(sav->alg_enc); if ((ntohl(sav->spi) & ~0xffff) != 0 || !algo) { stat->out_inval++; m_freem(m); return EINVAL; } if ((sav->flags & SADB_X_EXT_RAWCPI) == 0) cpi = sav->alg_enc; else cpi = ntohl(sav->spi) & 0xffff; /* compute original payload length */ plen = 0; for (n = md; n; n = n->m_next) plen += n->m_len; /* if the payload is short enough, we don't need to compress */ if (plen < algo->minplen) return 0; /* * retain the original packet for two purposes: * (1) we need to backout our changes when compression is not necessary. * (2) byte lifetime computation should use the original packet. * see RFC2401 page 23. * compromise two m_copym(). we will be going through every byte of * the payload during compression process anyways. */ mcopy = m_copym(m, 0, M_COPYALL, MB_DONTWAIT); if (mcopy == NULL) { error = ENOBUFS; return 0; } md0 = m_copym(md, 0, M_COPYALL, MB_DONTWAIT); if (md0 == NULL) { m_freem(mcopy); error = ENOBUFS; return 0; } plen0 = plen; /* make the packet over-writable */ for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) ; if (mprev == NULL || mprev->m_next != md) { ipseclog((LOG_DEBUG, "ipcomp%d_output: md is not in chain\n", afnumber)); stat->out_inval++; m_freem(m); m_freem(md0); m_freem(mcopy); return EINVAL; } mprev->m_next = NULL; if ((md = ipsec_copypkt(md)) == NULL) { m_freem(m); m_freem(md0); m_freem(mcopy); error = ENOBUFS; goto fail; } mprev->m_next = md; /* compress data part */ if ((*algo->compress)(m, md, &plen) || mprev->m_next == NULL) { ipseclog((LOG_ERR, "packet compression failure\n")); m = NULL; m_freem(md0); m_freem(mcopy); stat->out_inval++; error = EINVAL; goto fail; } stat->out_comphist[sav->alg_enc]++; md = mprev->m_next; /* * if the packet became bigger, meaningless to use IPComp. * we've only wasted our cpu time. */ if (plen0 < plen) { m_freem(md); m_freem(mcopy); mprev->m_next = md0; return 0; } /* * no need to backout change beyond here. */ m_freem(md0); md0 = NULL; m->m_pkthdr.len -= plen0; m->m_pkthdr.len += plen; { /* * insert IPComp header. */ #ifdef INET struct ip *ip = NULL; #endif size_t complen = sizeof(struct ipcomp); switch (af) { #ifdef INET case AF_INET: ip = mtod(m, struct ip *); break; #endif #ifdef INET6 case AF_INET6: break; #endif } compoff = m->m_pkthdr.len - plen; /* * grow the mbuf to accomodate ipcomp header. * before: IP ... payload * after: IP ... ipcomp payload */ if (M_LEADINGSPACE(md) < complen) { MGET(n, MB_DONTWAIT, MT_DATA); if (!n) { m_freem(m); error = ENOBUFS; goto fail; } n->m_len = complen; mprev->m_next = n; n->m_next = md; m->m_pkthdr.len += complen; ipcomp = mtod(n, struct ipcomp *); } else {
/* * Modify the packet so that the payload is encrypted. * The mbuf (m) must start with IPv4 or IPv6 header. * On failure, free the given mbuf and return NULL. * * on invocation: * m nexthdrp md * v v v * IP ......... payload * during the encryption: * m nexthdrp mprev md * v v v v * IP ............... esp iv payload pad padlen nxthdr * <--><-><------><---------------> * esplen plen extendsiz * ivlen * <-----> esphlen * <-> hlen * <-----------------> espoff */ static int esp_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, struct ipsecrequest *isr, int af) { struct mbuf *n; struct mbuf *mprev; struct esp *esp; struct esptail *esptail; struct secasvar *sav = isr->sav; const struct esp_algorithm *algo; u_int32_t spi; u_int8_t nxt = 0; size_t plen; /* payload length to be encrypted */ size_t espoff; int ivlen; int afnumber; size_t extendsiz; int error = 0; struct ipsecstat *stat; switch (af) { #ifdef INET case AF_INET: afnumber = 4; stat = &ipsecstat; break; #endif #ifdef INET6 case AF_INET6: afnumber = 6; stat = &ipsec6stat; break; #endif default: ipseclog((LOG_ERR, "esp_output: unsupported af %d\n", af)); return 0; /* no change at all */ } /* some sanity check */ if ((sav->flags & SADB_X_EXT_OLD) == 0 && !sav->replay) { switch (af) { #ifdef INET case AF_INET: { struct ip *ip; ip = mtod(m, struct ip *); ipseclog((LOG_DEBUG, "esp4_output: internal error: " "sav->replay is null: %x->%x, SPI=%u\n", (u_int32_t)ntohl(ip->ip_src.s_addr), (u_int32_t)ntohl(ip->ip_dst.s_addr), (u_int32_t)ntohl(sav->spi))); ipsecstat.out_inval++; break; } #endif /* INET */ #ifdef INET6 case AF_INET6: ipseclog((LOG_DEBUG, "esp6_output: internal error: " "sav->replay is null: SPI=%u\n", (u_int32_t)ntohl(sav->spi))); ipsec6stat.out_inval++; break; #endif /* INET6 */ default: panic("esp_output: should not reach here"); } m_freem(m); return EINVAL; } algo = esp_algorithm_lookup(sav->alg_enc); if (!algo) { ipseclog((LOG_ERR, "esp_output: unsupported algorithm: " "SPI=%u\n", (u_int32_t)ntohl(sav->spi))); m_freem(m); return EINVAL; } spi = sav->spi; ivlen = sav->ivlen; /* should be okey */ if (ivlen < 0) { panic("invalid ivlen"); } { /* * insert ESP header. * XXX inserts ESP header right after IPv4 header. should * chase the header chain. * XXX sequential number */ #ifdef INET struct ip *ip = NULL; #endif size_t esplen; /* sizeof(struct esp/newesp) */ size_t esphlen; /* sizeof(struct esp/newesp) + ivlen */ if (sav->flags & SADB_X_EXT_OLD) { /* RFC 1827 */ esplen = sizeof(struct esp); } else { /* RFC 2406 */ if (sav->flags & SADB_X_EXT_DERIV) esplen = sizeof(struct esp); else esplen = sizeof(struct newesp); } esphlen = esplen + ivlen; for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) ; if (mprev == NULL || mprev->m_next != md) { ipseclog((LOG_DEBUG, "esp%d_output: md is not in chain\n", afnumber)); m_freem(m); return EINVAL; } plen = 0; for (n = md; n; n = n->m_next) plen += n->m_len; switch (af) { #ifdef INET case AF_INET: ip = mtod(m, struct ip *); break; #endif #ifdef INET6 case AF_INET6: break; #endif } /* make the packet over-writable */ mprev->m_next = NULL; if ((md = ipsec_copypkt(md)) == NULL) { m_freem(m); error = ENOBUFS; goto fail; } mprev->m_next = md; espoff = m->m_pkthdr.len - plen; /* * grow the mbuf to accomodate ESP header. * before: IP ... payload * after: IP ... ESP IV payload */ if (M_LEADINGSPACE(md) < esphlen || (md->m_flags & M_EXT)) { MGET(n, M_NOWAIT, MT_DATA); if (!n) { m_freem(m); error = ENOBUFS; goto fail; } n->m_len = esphlen; mprev->m_next = n; n->m_next = md; m->m_pkthdr.len += esphlen; esp = mtod(n, struct esp *); } else {
/* * Modify the packet so that the payload is encrypted. * The mbuf (m) must start with IPv4 or IPv6 header. * On failure, free the given mbuf and return NULL. * * on invocation: * m nexthdrp md * v v v * IP ......... payload * during the encryption: * m nexthdrp mprev md * v v v v * IP ............... esp iv payload pad padlen nxthdr * <--><-><------><---------------> * esplen plen extendsiz * ivlen * <-----> esphlen * <-> hlen * <-----------------> espoff */ static int esp_output( struct mbuf *m, u_char *nexthdrp, struct mbuf *md, int af, struct secasvar *sav) { struct mbuf *n; struct mbuf *mprev; struct esp *esp; struct esptail *esptail; const struct esp_algorithm *algo; u_int32_t spi; u_int8_t nxt = 0; size_t plen; /*payload length to be encrypted*/ size_t espoff; size_t esphlen; /* sizeof(struct esp/newesp) + ivlen */ int ivlen; int afnumber; size_t extendsiz; int error = 0; struct ipsecstat *stat; struct udphdr *udp = NULL; int udp_encapsulate = (sav->flags & SADB_X_EXT_NATT && (af == AF_INET || af == AF_INET6) && (esp_udp_encap_port & 0xFFFF) != 0); KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_START, sav->ivlen,0,0,0,0); switch (af) { #if INET case AF_INET: afnumber = 4; stat = &ipsecstat; break; #endif #if INET6 case AF_INET6: afnumber = 6; stat = &ipsec6stat; break; #endif default: ipseclog((LOG_ERR, "esp_output: unsupported af %d\n", af)); KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 1,0,0,0,0); return 0; /* no change at all */ } /* some sanity check */ if ((sav->flags & SADB_X_EXT_OLD) == 0 && !sav->replay) { switch (af) { #if INET case AF_INET: { struct ip *ip; ip = mtod(m, struct ip *); ipseclog((LOG_DEBUG, "esp4_output: internal error: " "sav->replay is null: %x->%x, SPI=%u\n", (u_int32_t)ntohl(ip->ip_src.s_addr), (u_int32_t)ntohl(ip->ip_dst.s_addr), (u_int32_t)ntohl(sav->spi))); IPSEC_STAT_INCREMENT(ipsecstat.out_inval); break; } #endif /*INET*/ #if INET6 case AF_INET6: ipseclog((LOG_DEBUG, "esp6_output: internal error: " "sav->replay is null: SPI=%u\n", (u_int32_t)ntohl(sav->spi))); IPSEC_STAT_INCREMENT(ipsec6stat.out_inval); break; #endif /*INET6*/ default: panic("esp_output: should not reach here"); } m_freem(m); KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 2,0,0,0,0); return EINVAL; } algo = esp_algorithm_lookup(sav->alg_enc); if (!algo) { ipseclog((LOG_ERR, "esp_output: unsupported algorithm: " "SPI=%u\n", (u_int32_t)ntohl(sav->spi))); m_freem(m); KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 3,0,0,0,0); return EINVAL; } spi = sav->spi; ivlen = sav->ivlen; /* should be okey */ if (ivlen < 0) { panic("invalid ivlen"); } { /* * insert ESP header. * XXX inserts ESP header right after IPv4 header. should * chase the header chain. * XXX sequential number */ #if INET struct ip *ip = NULL; #endif #if INET6 struct ip6_hdr *ip6 = NULL; #endif size_t esplen; /* sizeof(struct esp/newesp) */ size_t hlen = 0; /* ip header len */ if (sav->flags & SADB_X_EXT_OLD) { /* RFC 1827 */ esplen = sizeof(struct esp); } else { /* RFC 2406 */ if (sav->flags & SADB_X_EXT_DERIV) esplen = sizeof(struct esp); else esplen = sizeof(struct newesp); } esphlen = esplen + ivlen; for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) ; if (mprev == NULL || mprev->m_next != md) { ipseclog((LOG_DEBUG, "esp%d_output: md is not in chain\n", afnumber)); m_freem(m); KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 4,0,0,0,0); return EINVAL; } plen = 0; for (n = md; n; n = n->m_next) plen += n->m_len; switch (af) { #if INET case AF_INET: ip = mtod(m, struct ip *); #ifdef _IP_VHL hlen = IP_VHL_HL(ip->ip_vhl) << 2; #else hlen = ip->ip_hl << 2; #endif break; #endif #if INET6 case AF_INET6: ip6 = mtod(m, struct ip6_hdr *); hlen = sizeof(*ip6); break; #endif } /* make the packet over-writable */ mprev->m_next = NULL; if ((md = ipsec_copypkt(md)) == NULL) { m_freem(m); error = ENOBUFS; goto fail; } mprev->m_next = md; /* * Translate UDP source port back to its original value. * SADB_X_EXT_NATT_MULTIPLEUSERS is only set for transort mode. */ if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) { /* if not UDP - drop it */ if (ip->ip_p != IPPROTO_UDP) { IPSEC_STAT_INCREMENT(ipsecstat.out_inval); m_freem(m); error = EINVAL; goto fail; } udp = mtod(md, struct udphdr *); /* if src port not set in sav - find it */ if (sav->natt_encapsulated_src_port == 0) if (key_natt_get_translated_port(sav) == 0) { m_freem(m); error = EINVAL; goto fail; } if (sav->remote_ike_port == htons(udp->uh_dport)) { /* translate UDP port */ udp->uh_dport = sav->natt_encapsulated_src_port; udp->uh_sum = 0; /* don't need checksum with ESP auth */ } else { /* drop the packet - can't translate the port */ IPSEC_STAT_INCREMENT(ipsecstat.out_inval); m_freem(m); error = EINVAL; goto fail; } } espoff = m->m_pkthdr.len - plen; if (udp_encapsulate) { esphlen += sizeof(struct udphdr); espoff += sizeof(struct udphdr); } /* * grow the mbuf to accomodate ESP header. * before: IP ... payload * after: IP ... [UDP] ESP IV payload */ if (M_LEADINGSPACE(md) < esphlen || (md->m_flags & M_EXT) != 0) { MGET(n, M_DONTWAIT, MT_DATA); if (!n) { m_freem(m); error = ENOBUFS; goto fail; } n->m_len = esphlen; mprev->m_next = n; n->m_next = md; m->m_pkthdr.len += esphlen; if (udp_encapsulate) { udp = mtod(n, struct udphdr *); esp = (struct esp *)(void *)((caddr_t)udp + sizeof(struct udphdr)); } else { esp = mtod(n, struct esp *); } } else {
void ah4_input(struct mbuf *m, int off) { struct ip *ip; struct ah *ah; u_int32_t spi; const struct ah_algorithm *algo; size_t siz; size_t siz1; u_char *cksum; struct secasvar *sav = NULL; u_int16_t nxt; size_t hlen; size_t stripsiz = 0; sa_family_t ifamily; if (m->m_len < off + sizeof(struct newah)) { m = m_pullup(m, off + sizeof(struct newah)); if (!m) { ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup;" "dropping the packet for simplicity\n")); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto fail; } } /* Expect 32-bit aligned data pointer on strict-align platforms */ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); ip = mtod(m, struct ip *); ah = (struct ah *)(void *)(((caddr_t)ip) + off); nxt = ah->ah_nxt; #ifdef _IP_VHL hlen = IP_VHL_HL(ip->ip_vhl) << 2; #else hlen = ip->ip_hl << 2; #endif /* find the sassoc. */ spi = ah->ah_spi; if ((sav = key_allocsa(AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, IPPROTO_AH, spi)) == 0) { ipseclog((LOG_WARNING, "IPv4 AH input: no key association found for spi %u\n", (u_int32_t)ntohl(spi))); IPSEC_STAT_INCREMENT(ipsecstat.in_nosa); goto fail; } KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP ah4_input called to allocate SA:0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(sav))); if (sav->state != SADB_SASTATE_MATURE && sav->state != SADB_SASTATE_DYING) { ipseclog((LOG_DEBUG, "IPv4 AH input: non-mature/dying SA found for spi %u\n", (u_int32_t)ntohl(spi))); IPSEC_STAT_INCREMENT(ipsecstat.in_badspi); goto fail; } algo = ah_algorithm_lookup(sav->alg_auth); if (!algo) { ipseclog((LOG_DEBUG, "IPv4 AH input: " "unsupported authentication algorithm for spi %u\n", (u_int32_t)ntohl(spi))); IPSEC_STAT_INCREMENT(ipsecstat.in_badspi); goto fail; } siz = (*algo->sumsiz)(sav); siz1 = ((siz + 3) & ~(4 - 1)); /* * sanity checks for header, 1. */ { int sizoff; sizoff = (sav->flags & SADB_X_EXT_OLD) ? 0 : 4; /* * Here, we do not do "siz1 == siz". This is because the way * RFC240[34] section 2 is written. They do not require truncation * to 96 bits. * For example, Microsoft IPsec stack attaches 160 bits of * authentication data for both hmac-md5 and hmac-sha1. For hmac-sha1, * 32 bits of padding is attached. * * There are two downsides to this specification. * They have no real harm, however, they leave us fuzzy feeling. * - if we attach more than 96 bits of authentication data onto AH, * we will never notice about possible modification by rogue * intermediate nodes. * Since extra bits in AH checksum is never used, this constitutes * no real issue, however, it is wacky. * - even if the peer attaches big authentication data, we will never * notice the difference, since longer authentication data will just * work. * * We may need some clarification in the spec. */ if (siz1 < siz) { ipseclog((LOG_NOTICE, "sum length too short in IPv4 AH input " "(%lu, should be at least %lu): %s\n", (u_int32_t)siz1, (u_int32_t)siz, ipsec4_logpacketstr(ip, spi))); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto fail; } if ((ah->ah_len << 2) - sizoff != siz1) { ipseclog((LOG_NOTICE, "sum length mismatch in IPv4 AH input " "(%d should be %lu): %s\n", (ah->ah_len << 2) - sizoff, (u_int32_t)siz1, ipsec4_logpacketstr(ip, spi))); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto fail; } if (m->m_len < off + sizeof(struct ah) + sizoff + siz1) { m = m_pullup(m, off + sizeof(struct ah) + sizoff + siz1); if (!m) { ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup\n")); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto fail; } /* Expect 32-bit aligned data ptr on strict-align platforms */ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); ip = mtod(m, struct ip *); ah = (struct ah *)(void *)(((caddr_t)ip) + off); } }
/* * For OUTBOUND packet having a socket. Searching SPD for packet, * and return a pointer to SP. * OUT: NULL: no apropreate SP found, the following value is set to error. * 0 : bypass * EACCES : discard packet. * ENOENT : ipsec_acquire() in progress, maybe. * others : error occured. * others: a pointer to SP * * NOTE: IPv6 mapped adddress concern is implemented here. */ static struct secpolicy * ipsec_getpolicybysock(struct mbuf *m, u_int dir, struct inpcb *inp, int *error) { struct inpcbpolicy *pcbsp; struct secpolicy *currsp = NULL; /* Policy on socket. */ struct secpolicy *sp; IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(inp != NULL, ("null inpcb")); IPSEC_ASSERT(error != NULL, ("null error")); IPSEC_ASSERT(dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND, ("invalid direction %u", dir)); /* Set spidx in pcb. */ *error = ipsec_setspidx_inpcb(m, inp); if (*error) return (NULL); pcbsp = inp->inp_sp; IPSEC_ASSERT(pcbsp != NULL, ("null pcbsp")); switch (dir) { case IPSEC_DIR_INBOUND: currsp = pcbsp->sp_in; break; case IPSEC_DIR_OUTBOUND: currsp = pcbsp->sp_out; break; } IPSEC_ASSERT(currsp != NULL, ("null currsp")); if (pcbsp->priv) { /* When privilieged socket. */ switch (currsp->policy) { case IPSEC_POLICY_BYPASS: case IPSEC_POLICY_IPSEC: key_addref(currsp); sp = currsp; break; case IPSEC_POLICY_ENTRUST: /* Look for a policy in SPD. */ sp = KEY_ALLOCSP(&currsp->spidx, dir); if (sp == NULL) /* No SP found. */ sp = KEY_ALLOCSP_DEFAULT(); break; default: ipseclog((LOG_ERR, "%s: Invalid policy for PCB %d\n", __func__, currsp->policy)); *error = EINVAL; return (NULL); } } else { /* Unpriv, SPD has policy. */ sp = KEY_ALLOCSP(&currsp->spidx, dir); if (sp == NULL) { /* No SP found. */ switch (currsp->policy) { case IPSEC_POLICY_BYPASS: ipseclog((LOG_ERR, "%s: Illegal policy for " "non-priviliged defined %d\n", __func__, currsp->policy)); *error = EINVAL; return (NULL); case IPSEC_POLICY_ENTRUST: sp = KEY_ALLOCSP_DEFAULT(); break; case IPSEC_POLICY_IPSEC: key_addref(currsp); sp = currsp; break; default: ipseclog((LOG_ERR, "%s: Invalid policy for " "PCB %d\n", __func__, currsp->policy)); *error = EINVAL; return (NULL); } } } IPSEC_ASSERT(sp != NULL, ("null SP (priv %u policy %u", pcbsp->priv, currsp->policy)); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s (priv %u policy %u) allocate SP:%p (refcnt %u)\n", __func__, pcbsp->priv, currsp->policy, sp, sp->refcnt)); return (sp); }
static int esp_cbc_mature(struct secasvar *sav) { int keylen; const struct esp_algorithm *algo; if (sav->flags & SADB_X_EXT_OLD) { ipseclog((LOG_ERR, "esp_cbc_mature: algorithm incompatible with esp-old\n")); return 1; } if (sav->flags & SADB_X_EXT_DERIV) { ipseclog((LOG_ERR, "esp_cbc_mature: algorithm incompatible with derived\n")); return 1; } if (!sav->key_enc) { ipseclog((LOG_ERR, "esp_cbc_mature: no key is given.\n")); return 1; } algo = esp_algorithm_lookup(sav->alg_enc); if (!algo) { ipseclog((LOG_ERR, "esp_cbc_mature: unsupported algorithm.\n")); return 1; } keylen = sav->key_enc->sadb_key_bits; if (keylen < algo->keymin || algo->keymax < keylen) { ipseclog((LOG_ERR, "esp_cbc_mature %s: invalid key length %d.\n", algo->name, sav->key_enc->sadb_key_bits)); return 1; } switch (sav->alg_enc) { case SADB_EALG_3DESCBC: /* weak key check */ if (des_is_weak_key((des_cblock *)_KEYBUF(sav->key_enc)) || des_is_weak_key((des_cblock *)(_KEYBUF(sav->key_enc) + 8)) || des_is_weak_key((des_cblock *)(_KEYBUF(sav->key_enc) + 16))) { ipseclog((LOG_ERR, "esp_cbc_mature %s: weak key was passed.\n", algo->name)); return 1; } break; case SADB_X_EALG_RIJNDAELCBC: /* allows specific key sizes only */ if (!(keylen == 128 || keylen == 192 || keylen == 256)) { ipseclog((LOG_ERR, "esp_cbc_mature %s: invalid key length %d.\n", algo->name, keylen)); return 1; } break; } return 0; }