static int g_eli_crypto_cipher(u_int algo, int enc, u_char *data, size_t datasize, const u_char *key, size_t keysize) { struct cryptoini cri; struct cryptop *crp; struct cryptodesc *crd; uint64_t sid; u_char *p; int error; KASSERT(algo != CRYPTO_AES_XTS, ("%s: CRYPTO_AES_XTS unexpected here", __func__)); bzero(&cri, sizeof(cri)); cri.cri_alg = algo; cri.cri_key = __DECONST(void *, key); cri.cri_klen = keysize; error = crypto_newsession(&sid, &cri, CRYPTOCAP_F_SOFTWARE); if (error != 0) return (error); p = malloc(sizeof(*crp) + sizeof(*crd), M_ELI, M_NOWAIT | M_ZERO); if (p == NULL) { crypto_freesession(sid); return (ENOMEM); } crp = (struct cryptop *)p; p += sizeof(*crp); crd = (struct cryptodesc *)p; p += sizeof(*crd); crd->crd_skip = 0; crd->crd_len = datasize; crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; if (enc) crd->crd_flags |= CRD_F_ENCRYPT; crd->crd_alg = algo; crd->crd_key = __DECONST(void *, key); crd->crd_klen = keysize; bzero(crd->crd_iv, sizeof(crd->crd_iv)); crd->crd_next = NULL; crp->crp_sid = sid; crp->crp_ilen = datasize; crp->crp_olen = datasize; crp->crp_opaque = NULL; crp->crp_callback = g_eli_crypto_done; crp->crp_buf = (void *)data; crp->crp_flags = CRYPTO_F_CBIFSYNC; crp->crp_desc = crd; error = crypto_dispatch(crp); if (error == 0) { while (crp->crp_opaque == NULL) tsleep(crp, PRIBIO, "geli", hz / 5); error = crp->crp_etype; } free(crp, M_ELI); crypto_freesession(sid); return (error); }
static void ocf_request(void *arg) { request_t *r = arg; struct cryptop *crp = crypto_getreq(2); struct cryptodesc *crde, *crda; unsigned long flags; if (!crp) { spin_lock_irqsave(&ocfbench_counter_lock, flags); outstanding--; spin_unlock_irqrestore(&ocfbench_counter_lock, flags); return; } crde = crp->crp_desc; crda = crde->crd_next; crda->crd_skip = 0; crda->crd_flags = 0; crda->crd_len = request_size; crda->crd_inject = request_size; crda->crd_alg = CRYPTO_SHA1_HMAC; crda->crd_key = "0123456789abcdefghij"; crda->crd_klen = 20 * 8; crde->crd_skip = 0; crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT; crde->crd_len = request_size; crde->crd_inject = request_size; //crde->crd_alg = CRYPTO_3DES_CBC; crde->crd_alg = CRYPTO_AES_CBC; crde->crd_key = "0123456789abcdefghijklmn"; crde->crd_klen = 24 * 8; crp->crp_ilen = request_size + 64; crp->crp_flags = 0; if (request_batch) crp->crp_flags |= CRYPTO_F_BATCH; if (request_cbimm) crp->crp_flags |= CRYPTO_F_CBIMM; crp->crp_buf = (caddr_t) r->buffer; crp->crp_callback = ocf_cb; crp->crp_sid = ocf_cryptoid; crp->crp_opaque = (caddr_t) r; crypto_dispatch(crp); }
static int aes_crypto_cb(struct cryptop *crp) { int error; struct aes_state *as = (struct aes_state *) crp->crp_opaque; if (CRYPTO_SESID2CAPS(as->as_session) & CRYPTOCAP_F_SYNC) return (0); error = crp->crp_etype; if (error == EAGAIN) error = crypto_dispatch(crp); mtx_lock(&as->as_lock); if (error || (crp->crp_flags & CRYPTO_F_DONE)) wakeup(crp); mtx_unlock(&as->as_lock); return (0); }
static void des3_encrypt_1(const struct krb5_key_state *ks, struct mbuf *inout, size_t skip, size_t len, void *ivec, int encdec) { struct des3_state *ds = ks->ks_priv; struct cryptop *crp; struct cryptodesc *crd; int error; crp = crypto_getreq(1); crd = crp->crp_desc; crd->crd_skip = skip; crd->crd_len = len; crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT | encdec; if (ivec) { bcopy(ivec, crd->crd_iv, 8); } else { bzero(crd->crd_iv, 8); } crd->crd_next = NULL; crd->crd_alg = CRYPTO_3DES_CBC; crp->crp_sid = ds->ds_session; crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; crp->crp_buf = (void *) inout; crp->crp_opaque = (void *) ds; crp->crp_callback = des3_crypto_cb; error = crypto_dispatch(crp); if ((CRYPTO_SESID2CAPS(ds->ds_session) & CRYPTOCAP_F_SYNC) == 0) { mtx_lock(&ds->ds_lock); if (!error && !(crp->crp_flags & CRYPTO_F_DONE)) error = msleep(crp, &ds->ds_lock, 0, "gssdes3", 0); mtx_unlock(&ds->ds_lock); } crypto_freereq(crp); }
static void aes_encrypt_1(const struct krb5_key_state *ks, int buftype, void *buf, size_t skip, size_t len, void *ivec, int encdec) { struct aes_state *as = ks->ks_priv; struct cryptop *crp; struct cryptodesc *crd; int error; crp = crypto_getreq(1); crd = crp->crp_desc; crd->crd_skip = skip; crd->crd_len = len; crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT | encdec; if (ivec) { bcopy(ivec, crd->crd_iv, 16); } else { bzero(crd->crd_iv, 16); } crd->crd_next = NULL; crd->crd_alg = CRYPTO_AES_CBC; crp->crp_sid = as->as_session; crp->crp_flags = buftype | CRYPTO_F_CBIFSYNC; crp->crp_buf = buf; crp->crp_opaque = (void *) as; crp->crp_callback = aes_crypto_cb; error = crypto_dispatch(crp); if ((CRYPTO_SESID2CAPS(as->as_session) & CRYPTOCAP_F_SYNC) == 0) { mtx_lock(&as->as_lock); if (!error && !(crp->crp_flags & CRYPTO_F_DONE)) error = msleep(crp, &as->as_lock, 0, "gssaes", 0); mtx_unlock(&as->as_lock); } crypto_freereq(crp); }
static void ocf_request(void *arg) { request_t *r = arg; struct cryptop *crp = crypto_getreq(2); struct cryptodesc *crde, *crda; if (!crp) { outstanding--; return; } crde = crp->crp_desc; crda = crde->crd_next; crda->crd_skip = 0; crda->crd_flags = 0; crda->crd_len = request_size; crda->crd_inject = request_size; crda->crd_alg = CRYPTO_SHA1_HMAC; crda->crd_key = "0123456789abcdefghij"; crda->crd_klen = 20 * 8; crde->crd_skip = 0; crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT; crde->crd_len = request_size; crde->crd_inject = request_size; crde->crd_alg = CRYPTO_3DES_CBC; crde->crd_key = "0123456789abcdefghijklmn"; crde->crd_klen = 24 * 8; crp->crp_ilen = request_size + 64; crp->crp_flags = CRYPTO_F_CBIMM; crp->crp_buf = (caddr_t) r->buffer; crp->crp_callback = ocf_cb; crp->crp_sid = ocf_cryptoid; crp->crp_opaque = (caddr_t) r; crypto_dispatch(crp); }
static void aes_checksum(const struct krb5_key_state *ks, int usage, struct mbuf *inout, size_t skip, size_t inlen, size_t outlen) { struct aes_state *as = ks->ks_priv; struct cryptop *crp; struct cryptodesc *crd; int error; crp = crypto_getreq(1); crd = crp->crp_desc; crd->crd_skip = skip; crd->crd_len = inlen; crd->crd_inject = skip + inlen; crd->crd_flags = 0; crd->crd_next = NULL; crd->crd_alg = CRYPTO_SHA1_HMAC; crp->crp_sid = as->as_session; crp->crp_ilen = inlen; crp->crp_olen = 12; crp->crp_etype = 0; crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; crp->crp_buf = (void *) inout; crp->crp_opaque = (void *) as; crp->crp_callback = aes_crypto_cb; error = crypto_dispatch(crp); if ((CRYPTO_SESID2CAPS(as->as_session) & CRYPTOCAP_F_SYNC) == 0) { mtx_lock(&as->as_lock); if (!error && !(crp->crp_flags & CRYPTO_F_DONE)) error = msleep(crp, &as->as_lock, 0, "gssaes", 0); mtx_unlock(&as->as_lock); } crypto_freereq(crp); }
/* * ESP input processing, called (eventually) through the protocol switch. */ static int esp_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff) { struct auth_hash *esph; struct enc_xform *espx; struct tdb_ident *tdbi; struct tdb_crypto *tc; int plen, alen, hlen; struct m_tag *mtag; struct newesp *esp; struct cryptodesc *crde; struct cryptop *crp; IPSEC_ASSERT(sav != NULL, ("null SA")); IPSEC_ASSERT(sav->tdb_encalgxform != NULL, ("null encoding xform")); /* Valid IP Packet length ? */ if ( (skip&3) || (m->m_pkthdr.len&3) ){ DPRINTF(("%s: misaligned packet, skip %u pkt len %u", __func__, skip, m->m_pkthdr.len)); ESPSTAT_INC(esps_badilen); m_freem(m); return EINVAL; } /* XXX don't pullup, just copy header */ IP6_EXTHDR_GET(esp, struct newesp *, m, skip, sizeof (struct newesp)); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; /* Determine the ESP header length */ if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; /* Authenticator hash size */ if (esph != NULL) { switch (esph->type) { case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: alen = esph->hashsize/2; break; default: alen = AH_HMAC_HASHLEN; break; } }else alen = 0; /* * Verify payload length is multiple of encryption algorithm * block size. * * NB: This works for the null algorithm because the blocksize * is 4 and all packets must be 4-byte aligned regardless * of the algorithm. */ plen = m->m_pkthdr.len - (skip + hlen + alen); if ((plen & (espx->blocksize - 1)) || (plen <= 0)) { DPRINTF(("%s: payload of %d octets not a multiple of %d octets," " SA %s/%08lx\n", __func__, plen, espx->blocksize, ipsec_address(&sav->sah->saidx.dst), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_badilen); m_freem(m); return EINVAL; } /* * Check sequence number. */ if (esph && sav->replay && !ipsec_chkreplay(ntohl(esp->esp_seq), sav)) { DPRINTF(("%s: packet replay check for %s\n", __func__, ipsec_logsastr(sav))); /*XXX*/ ESPSTAT_INC(esps_replay); m_freem(m); return ENOBUFS; /*XXX*/ } /* Update the counters */ ESPSTAT_ADD(esps_ibytes, m->m_pkthdr.len - (skip + hlen + alen)); /* Find out if we've already done crypto */ for (mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_CRYPTO_DONE, NULL); mtag != NULL; mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_CRYPTO_DONE, mtag)) { tdbi = (struct tdb_ident *) (mtag + 1); if (tdbi->proto == sav->sah->saidx.proto && tdbi->spi == sav->spi && !bcmp(&tdbi->dst, &sav->sah->saidx.dst, sizeof(union sockaddr_union))) break; } /* Get crypto descriptors */ crp = crypto_getreq(esph && espx ? 2 : 1); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptors\n", __func__)); ESPSTAT_INC(esps_crypto); m_freem(m); return ENOBUFS; } /* Get IPsec-specific opaque pointer */ if (esph == NULL || mtag != NULL) tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto), M_XDATA, M_NOWAIT|M_ZERO); else tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto) + alen, M_XDATA, M_NOWAIT|M_ZERO); if (tc == NULL) { crypto_freereq(crp); DPRINTF(("%s: failed to allocate tdb_crypto\n", __func__)); ESPSTAT_INC(esps_crypto); m_freem(m); return ENOBUFS; } tc->tc_ptr = (caddr_t) mtag; if (esph) { struct cryptodesc *crda = crp->crp_desc; IPSEC_ASSERT(crda != NULL, ("null ah crypto descriptor")); /* Authentication descriptor */ crda->crd_skip = skip; crda->crd_len = m->m_pkthdr.len - (skip + alen); crda->crd_inject = m->m_pkthdr.len - alen; crda->crd_alg = esph->type; crda->crd_key = sav->key_auth->key_data; crda->crd_klen = _KEYBITS(sav->key_auth); /* Copy the authenticator */ if (mtag == NULL) m_copydata(m, m->m_pkthdr.len - alen, alen, (caddr_t) (tc + 1)); /* Chain authentication request */ crde = crda->crd_next; } else { crde = crp->crp_desc; } /* Crypto operation descriptor */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length */ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; crp->crp_buf = (caddr_t) m; crp->crp_callback = esp_input_cb; crp->crp_sid = sav->tdb_cryptoid; crp->crp_opaque = (caddr_t) tc; /* These are passed as-is to the callback */ tc->tc_spi = sav->spi; tc->tc_dst = sav->sah->saidx.dst; tc->tc_proto = sav->sah->saidx.proto; tc->tc_protoff = protoff; tc->tc_skip = skip; KEY_ADDREFSA(sav); tc->tc_sav = sav; /* Decryption descriptor */ if (espx) { IPSEC_ASSERT(crde != NULL, ("null esp crypto descriptor")); crde->crd_skip = skip + hlen; crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen); crde->crd_inject = skip + hlen - sav->ivlen; crde->crd_alg = espx->type; crde->crd_key = sav->key_enc->key_data; crde->crd_klen = _KEYBITS(sav->key_enc); /* XXX Rounds ? */ } if (mtag == NULL) return crypto_dispatch(crp); else return esp_input_cb(crp); }
/* * ESP output routine, called by ipsp_process_packet(). */ int esp_output(struct mbuf *m, struct tdb *tdb, struct mbuf **mp, int skip, int protoff) { struct enc_xform *espx = (struct enc_xform *) tdb->tdb_encalgxform; struct auth_hash *esph = (struct auth_hash *) tdb->tdb_authalgxform; int ilen, hlen, rlen, padding, blks, alen; struct mbuf *mi, *mo = (struct mbuf *) NULL; struct tdb_crypto *tc; unsigned char *pad; u_int8_t prot; struct cryptodesc *crde = NULL, *crda = NULL; struct cryptop *crp; #if NBPFILTER > 0 struct ifnet *ifn = &(encif[0].sc_if); ifn->if_opackets++; ifn->if_obytes += m->m_pkthdr.len; if (ifn->if_bpf) { struct enchdr hdr; bzero (&hdr, sizeof(hdr)); hdr.af = tdb->tdb_dst.sa.sa_family; hdr.spi = tdb->tdb_spi; if (espx) hdr.flags |= M_CONF; if (esph) hdr.flags |= M_AUTH; bpf_mtap_hdr(ifn->if_bpf, (char *)&hdr, ENC_HDRLEN, m, BPF_DIRECTION_OUT); } #endif if (tdb->tdb_flags & TDBF_NOREPLAY) hlen = sizeof(u_int32_t) + tdb->tdb_ivlen; else hlen = 2 * sizeof(u_int32_t) + tdb->tdb_ivlen; rlen = m->m_pkthdr.len - skip; /* Raw payload length. */ if (espx) blks = espx->blocksize; else blks = 4; /* If no encryption, we have to be 4-byte aligned. */ padding = ((blks - ((rlen + 2) % blks)) % blks) + 2; if (esph) alen = AH_HMAC_HASHLEN; else alen = 0; espstat.esps_output++; switch (tdb->tdb_dst.sa.sa_family) { #ifdef INET case AF_INET: /* Check for IP maximum packet size violations. */ if (skip + hlen + rlen + padding + alen > IP_MAXPACKET) { DPRINTF(("esp_output(): packet in SA %s/%08x got " "too big\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); espstat.esps_toobig++; return EMSGSIZE; } break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Check for IPv6 maximum packet size violations. */ if (skip + hlen + rlen + padding + alen > IPV6_MAXPACKET) { DPRINTF(("esp_output(): packet in SA %s/%08x got too " "big\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); espstat.esps_toobig++; return EMSGSIZE; } break; #endif /* INET6 */ default: DPRINTF(("esp_output(): unknown/unsupported protocol " "family %d, SA %s/%08x\n", tdb->tdb_dst.sa.sa_family , ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); espstat.esps_nopf++; return EPFNOSUPPORT; } /* Update the counters. */ tdb->tdb_cur_bytes += m->m_pkthdr.len - skip; espstat.esps_obytes += m->m_pkthdr.len - skip; /* Hard byte expiration. */ if (tdb->tdb_flags & TDBF_BYTES && tdb->tdb_cur_bytes >= tdb->tdb_exp_bytes) { pfkeyv2_expire(tdb, SADB_EXT_LIFETIME_HARD); tdb_delete(tdb); m_freem(m); return EINVAL; } /* Soft byte expiration. */ if (tdb->tdb_flags & TDBF_SOFT_BYTES && tdb->tdb_cur_bytes >= tdb->tdb_soft_bytes) { pfkeyv2_expire(tdb, SADB_EXT_LIFETIME_SOFT); tdb->tdb_flags &= ~TDBF_SOFT_BYTES; /* Turn off checking. */ } /* * Loop through mbuf chain; if we find a readonly mbuf, * replace the rest of the chain. */ mo = NULL; mi = m; while (mi != NULL && !M_READONLY(mi)) { mo = mi; mi = mi->m_next; } if (mi != NULL) { /* Replace the rest of the mbuf chain. */ struct mbuf *n = m_copym2(mi, 0, M_COPYALL, M_DONTWAIT); if (n == NULL) { DPRINTF(("esp_output(): bad mbuf chain, SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); espstat.esps_hdrops++; m_freem(m); return ENOBUFS; } if (mo != NULL) mo->m_next = n; else m = n; m_freem(mi); } /* Inject ESP header. */ mo = m_inject(m, skip, hlen, M_DONTWAIT); if (mo == NULL) { DPRINTF(("esp_output(): failed to inject ESP header for " "SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); espstat.esps_hdrops++; return ENOBUFS; } /* Initialize ESP header. */ bcopy((caddr_t) &tdb->tdb_spi, mtod(mo, caddr_t), sizeof(u_int32_t)); if (!(tdb->tdb_flags & TDBF_NOREPLAY)) { u_int32_t replay = htonl(tdb->tdb_rpl++); bcopy((caddr_t) &replay, mtod(mo, caddr_t) + sizeof(u_int32_t), sizeof(u_int32_t)); #if NPFSYNC > 0 pfsync_update_tdb(tdb,1); #endif } /* * Add padding -- better to do it ourselves than use the crypto engine, * although if/when we support compression, we'd have to do that. */ mo = m_inject(m, m->m_pkthdr.len, padding + alen, M_DONTWAIT); if (mo == NULL) { DPRINTF(("esp_output(): m_inject failed for SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); return ENOBUFS; } pad = mtod(mo, u_char *); /* Self-describing or random padding ? */ if (!(tdb->tdb_flags & TDBF_RANDOMPADDING)) for (ilen = 0; ilen < padding - 2; ilen++) pad[ilen] = ilen + 1; else arc4random_buf((void *) pad, padding - 2); /* Fix padding length and Next Protocol in padding itself. */ pad[padding - 2] = padding - 2; m_copydata(m, protoff, sizeof(u_int8_t), pad + padding - 1); /* Fix Next Protocol in IPv4/IPv6 header. */ prot = IPPROTO_ESP; m_copyback(m, protoff, sizeof(u_int8_t), &prot); /* Get crypto descriptors. */ crp = crypto_getreq(esph && espx ? 2 : 1); if (crp == NULL) { m_freem(m); DPRINTF(("esp_output(): failed to acquire crypto " "descriptors\n")); espstat.esps_crypto++; return ENOBUFS; } if (espx) { crde = crp->crp_desc; crda = crde->crd_next; /* Encryption descriptor. */ crde->crd_skip = skip + hlen; crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen); crde->crd_flags = CRD_F_ENCRYPT; crde->crd_inject = skip + hlen - tdb->tdb_ivlen; if (tdb->tdb_flags & TDBF_HALFIV) { /* Copy half-iv in the packet. */ m_copyback(m, crde->crd_inject, tdb->tdb_ivlen, tdb->tdb_iv); /* Cook half-iv. */ bcopy(tdb->tdb_iv, crde->crd_iv, tdb->tdb_ivlen); for (ilen = 0; ilen < tdb->tdb_ivlen; ilen++) crde->crd_iv[tdb->tdb_ivlen + ilen] = ~crde->crd_iv[ilen]; crde->crd_flags |= CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT; } /* Encryption operation. */ crde->crd_alg = espx->type; crde->crd_key = tdb->tdb_emxkey; crde->crd_klen = tdb->tdb_emxkeylen * 8; /* XXX Rounds ? */ } else crda = crp->crp_desc; /* IPsec-specific opaque crypto info. */ tc = malloc(sizeof(*tc), M_XDATA, M_NOWAIT | M_ZERO); if (tc == NULL) { m_freem(m); crypto_freereq(crp); DPRINTF(("esp_output(): failed to allocate tdb_crypto\n")); espstat.esps_crypto++; return ENOBUFS; } tc->tc_spi = tdb->tdb_spi; tc->tc_proto = tdb->tdb_sproto; bcopy(&tdb->tdb_dst, &tc->tc_dst, sizeof(union sockaddr_union)); /* Crypto operation descriptor. */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_flags = CRYPTO_F_IMBUF; crp->crp_buf = (caddr_t) m; crp->crp_callback = (int (*) (struct cryptop *)) esp_output_cb; crp->crp_opaque = (caddr_t) tc; crp->crp_sid = tdb->tdb_cryptoid; if (esph) { /* Authentication descriptor. */ crda->crd_skip = skip; crda->crd_len = m->m_pkthdr.len - (skip + alen); crda->crd_inject = m->m_pkthdr.len - alen; /* Authentication operation. */ crda->crd_alg = esph->type; crda->crd_key = tdb->tdb_amxkey; crda->crd_klen = tdb->tdb_amxkeylen * 8; } if ((tdb->tdb_flags & TDBF_SKIPCRYPTO) == 0) return crypto_dispatch(crp); else return esp_output_cb(crp); }
/* * ESP input callback, called directly by the crypto driver. */ int esp_input_cb(void *op) { u_int8_t lastthree[3], aalg[AH_HMAC_HASHLEN]; int s, hlen, roff, skip, protoff, error; struct mbuf *m1, *mo, *m; struct auth_hash *esph; struct tdb_crypto *tc; struct cryptop *crp; struct m_tag *mtag; struct tdb *tdb; u_int32_t btsx; caddr_t ptr; crp = (struct cryptop *) op; tc = (struct tdb_crypto *) crp->crp_opaque; skip = tc->tc_skip; protoff = tc->tc_protoff; mtag = (struct m_tag *) tc->tc_ptr; m = (struct mbuf *) crp->crp_buf; if (m == NULL) { /* Shouldn't happen... */ free(tc, M_XDATA); crypto_freereq(crp); espstat.esps_crypto++; DPRINTF(("esp_input_cb(): bogus returned buffer from crypto\n")); return (EINVAL); } s = spltdb(); tdb = gettdb(tc->tc_spi, &tc->tc_dst, tc->tc_proto); if (tdb == NULL) { free(tc, M_XDATA); espstat.esps_notdb++; DPRINTF(("esp_input_cb(): TDB is expired while in crypto")); error = EPERM; goto baddone; } esph = (struct auth_hash *) tdb->tdb_authalgxform; /* Check for crypto errors */ if (crp->crp_etype) { if (crp->crp_etype == EAGAIN) { /* Reset the session ID */ if (tdb->tdb_cryptoid != 0) tdb->tdb_cryptoid = crp->crp_sid; splx(s); return crypto_dispatch(crp); } free(tc, M_XDATA); espstat.esps_noxform++; DPRINTF(("esp_input_cb(): crypto error %d\n", crp->crp_etype)); error = crp->crp_etype; goto baddone; } /* If authentication was performed, check now. */ if (esph != NULL) { /* * If we have a tag, it means an IPsec-aware NIC did the verification * for us. */ if (mtag == NULL) { /* Copy the authenticator from the packet */ m_copydata(m, m->m_pkthdr.len - esph->authsize, esph->authsize, aalg); ptr = (caddr_t) (tc + 1); /* Verify authenticator */ if (bcmp(ptr, aalg, esph->authsize)) { free(tc, M_XDATA); DPRINTF(("esp_input_cb(): authentication failed for packet in SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); espstat.esps_badauth++; error = EACCES; goto baddone; } } /* Remove trailing authenticator */ m_adj(m, -(esph->authsize)); } free(tc, M_XDATA); /* Replay window checking, if appropriate */ if ((tdb->tdb_wnd > 0) && (!(tdb->tdb_flags & TDBF_NOREPLAY))) { m_copydata(m, skip + sizeof(u_int32_t), sizeof(u_int32_t), (unsigned char *) &btsx); btsx = ntohl(btsx); switch (checkreplaywindow32(btsx, 0, &(tdb->tdb_rpl), tdb->tdb_wnd, &(tdb->tdb_bitmap), 1)) { case 0: /* All's well */ #if NPFSYNC > 0 pfsync_update_tdb(tdb,0); #endif break; case 1: DPRINTF(("esp_input_cb(): replay counter wrapped for SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); espstat.esps_wrap++; error = EACCES; goto baddone; case 2: case 3: DPRINTF(("esp_input_cb(): duplicate packet received in SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); error = EACCES; goto baddone; default: DPRINTF(("esp_input_cb(): bogus value from checkreplaywindow32() in SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); espstat.esps_replay++; error = EACCES; goto baddone; } } /* Release the crypto descriptors */ crypto_freereq(crp); /* Determine the ESP header length */ if (tdb->tdb_flags & TDBF_NOREPLAY) hlen = sizeof(u_int32_t) + tdb->tdb_ivlen; /* "old" ESP */ else hlen = 2 * sizeof(u_int32_t) + tdb->tdb_ivlen; /* "new" ESP */ /* Find beginning of ESP header */ m1 = m_getptr(m, skip, &roff); if (m1 == NULL) { espstat.esps_hdrops++; splx(s); DPRINTF(("esp_input_cb(): bad mbuf chain, SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); return EINVAL; } /* Remove the ESP header and IV from the mbuf. */ if (roff == 0) { /* The ESP header was conveniently at the beginning of the mbuf */ m_adj(m1, hlen); if (!(m1->m_flags & M_PKTHDR)) m->m_pkthdr.len -= hlen; } else if (roff + hlen >= m1->m_len) { /* * Part or all of the ESP header is at the end of this mbuf, so * first let's remove the remainder of the ESP header from the * beginning of the remainder of the mbuf chain, if any. */ if (roff + hlen > m1->m_len) { /* Adjust the next mbuf by the remainder */ m_adj(m1->m_next, roff + hlen - m1->m_len); /* The second mbuf is guaranteed not to have a pkthdr... */ m->m_pkthdr.len -= (roff + hlen - m1->m_len); } /* Now, let's unlink the mbuf chain for a second...*/ mo = m1->m_next; m1->m_next = NULL; /* ...and trim the end of the first part of the chain...sick */ m_adj(m1, -(m1->m_len - roff)); if (!(m1->m_flags & M_PKTHDR)) m->m_pkthdr.len -= (m1->m_len - roff); /* Finally, let's relink */ m1->m_next = mo; } else { /* * The ESP header lies in the "middle" of the mbuf...do an * overlapping copy of the remainder of the mbuf over the ESP * header. */ bcopy(mtod(m1, u_char *) + roff + hlen, mtod(m1, u_char *) + roff, m1->m_len - (roff + hlen)); m1->m_len -= hlen; m->m_pkthdr.len -= hlen; } /* Save the last three bytes of decrypted data */ m_copydata(m, m->m_pkthdr.len - 3, 3, lastthree); /* Verify pad length */ if (lastthree[1] + 2 > m->m_pkthdr.len - skip) { espstat.esps_badilen++; splx(s); DPRINTF(("esp_input_cb(): invalid padding length %d for packet in SA %s/%08x\n", lastthree[1], ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); return EINVAL; } /* Verify correct decryption by checking the last padding bytes */ if (!(tdb->tdb_flags & TDBF_RANDOMPADDING)) { if ((lastthree[1] != lastthree[0]) && (lastthree[1] != 0)) { espstat.esps_badenc++; splx(s); DPRINTF(("esp_input(): decryption failed for packet in SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); return EINVAL; } } /* Trim the mbuf chain to remove the trailing authenticator and padding */ m_adj(m, -(lastthree[1] + 2)); /* Restore the Next Protocol field */ m_copyback(m, protoff, sizeof(u_int8_t), lastthree + 2); /* Back to generic IPsec input processing */ error = ipsec_common_input_cb(m, tdb, skip, protoff, mtag); splx(s); return (error); baddone: splx(s); if (m != NULL) m_freem(m); crypto_freereq(crp); return (error); }
/* * ESP input processing, called (eventually) through the protocol switch. */ static int esp_input(struct mbuf *m, const struct secasvar *sav, int skip, int protoff) { const struct auth_hash *esph; const struct enc_xform *espx; struct tdb_ident *tdbi; struct tdb_crypto *tc; int plen, alen, hlen, error; struct m_tag *mtag; struct newesp *esp; struct cryptodesc *crde; struct cryptop *crp; IPSEC_SPLASSERT_SOFTNET("esp_input"); IPSEC_ASSERT(sav != NULL, ("esp_input: null SA")); IPSEC_ASSERT(sav->tdb_encalgxform != NULL, ("esp_input: null encoding xform")); IPSEC_ASSERT((skip&3) == 0 && (m->m_pkthdr.len&3) == 0, ("esp_input: misaligned packet, skip %u pkt len %u", skip, m->m_pkthdr.len)); /* XXX don't pullup, just copy header */ IP6_EXTHDR_GET(esp, struct newesp *, m, skip, sizeof (struct newesp)); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; /* Determine the ESP header length */ if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; /* Authenticator hash size */ alen = esph ? esph->authsize : 0; /* * Verify payload length is multiple of encryption algorithm * block size. * * NB: This works for the null algorithm because the blocksize * is 4 and all packets must be 4-byte aligned regardless * of the algorithm. */ plen = m->m_pkthdr.len - (skip + hlen + alen); if ((plen & (espx->blocksize - 1)) || (plen <= 0)) { DPRINTF(("esp_input: " "payload of %d octets not a multiple of %d octets," " SA %s/%08lx\n", plen, espx->blocksize, ipsec_address(&sav->sah->saidx.dst), (u_long) ntohl(sav->spi))); ESP_STATINC(ESP_STAT_BADILEN); m_freem(m); return EINVAL; } /* * Check sequence number. */ if (esph && sav->replay && !ipsec_chkreplay(ntohl(esp->esp_seq), sav)) { DPRINTF(("esp_input: packet replay check for %s\n", ipsec_logsastr(sav))); /*XXX*/ ESP_STATINC(ESP_STAT_REPLAY); m_freem(m); return ENOBUFS; /*XXX*/ } /* Update the counters */ ESP_STATADD(ESP_STAT_IBYTES, m->m_pkthdr.len - skip - hlen - alen); /* Find out if we've already done crypto */ for (mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_CRYPTO_DONE, NULL); mtag != NULL; mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_CRYPTO_DONE, mtag)) { tdbi = (struct tdb_ident *) (mtag + 1); if (tdbi->proto == sav->sah->saidx.proto && tdbi->spi == sav->spi && !memcmp(&tdbi->dst, &sav->sah->saidx.dst, sizeof(union sockaddr_union))) break; } /* Get crypto descriptors */ crp = crypto_getreq(esph && espx ? 2 : 1); if (crp == NULL) { DPRINTF(("esp_input: failed to acquire crypto descriptors\n")); ESP_STATINC(ESP_STAT_CRYPTO); m_freem(m); return ENOBUFS; } /* Get IPsec-specific opaque pointer */ if (esph == NULL || mtag != NULL) tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto), M_XDATA, M_NOWAIT|M_ZERO); else tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto) + alen, M_XDATA, M_NOWAIT|M_ZERO); if (tc == NULL) { crypto_freereq(crp); DPRINTF(("esp_input: failed to allocate tdb_crypto\n")); ESP_STATINC(ESP_STAT_CRYPTO); m_freem(m); return ENOBUFS; } error = m_makewritable(&m, 0, m->m_pkthdr.len, M_NOWAIT); if (error) { m_freem(m); free(tc, M_XDATA); crypto_freereq(crp); DPRINTF(("esp_input: m_makewritable failed\n")); ESP_STATINC(ESP_STAT_CRYPTO); return error; } tc->tc_ptr = mtag; if (esph) { struct cryptodesc *crda = crp->crp_desc; IPSEC_ASSERT(crda != NULL, ("esp_input: null ah crypto descriptor")); /* Authentication descriptor */ crda->crd_skip = skip; if (espx && espx->type == CRYPTO_AES_GCM_16) crda->crd_len = hlen - sav->ivlen; else crda->crd_len = m->m_pkthdr.len - (skip + alen); crda->crd_inject = m->m_pkthdr.len - alen; crda->crd_alg = esph->type; if (espx && (espx->type == CRYPTO_AES_GCM_16 || espx->type == CRYPTO_AES_GMAC)) { crda->crd_key = _KEYBUF(sav->key_enc); crda->crd_klen = _KEYBITS(sav->key_enc); } else { crda->crd_key = _KEYBUF(sav->key_auth); crda->crd_klen = _KEYBITS(sav->key_auth); } /* Copy the authenticator */ if (mtag == NULL) m_copydata(m, m->m_pkthdr.len - alen, alen, (tc + 1)); /* Chain authentication request */ crde = crda->crd_next; } else { crde = crp->crp_desc; } /* Crypto operation descriptor */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length */ crp->crp_flags = CRYPTO_F_IMBUF; crp->crp_buf = m; crp->crp_callback = esp_input_cb; crp->crp_sid = sav->tdb_cryptoid; crp->crp_opaque = tc; /* These are passed as-is to the callback */ tc->tc_spi = sav->spi; tc->tc_dst = sav->sah->saidx.dst; tc->tc_proto = sav->sah->saidx.proto; tc->tc_protoff = protoff; tc->tc_skip = skip; /* Decryption descriptor */ if (espx) { IPSEC_ASSERT(crde != NULL, ("esp_input: null esp crypto descriptor")); crde->crd_skip = skip + hlen; if (espx->type == CRYPTO_AES_GMAC) crde->crd_len = 0; else crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen); crde->crd_inject = skip + hlen - sav->ivlen; crde->crd_alg = espx->type; crde->crd_key = _KEYBUF(sav->key_enc); crde->crd_klen = _KEYBITS(sav->key_enc); /* XXX Rounds ? */ } if (mtag == NULL) return crypto_dispatch(crp); else return esp_input_cb(crp); }
/* * ESP input callback from the crypto driver. */ static int esp_input_cb(struct cryptop *crp) { char buf[128]; u_int8_t lastthree[3], aalg[AH_HMAC_MAXHASHLEN]; int hlen, skip, protoff, error, alen; struct mbuf *m; struct cryptodesc *crd; struct auth_hash *esph; struct enc_xform *espx; struct tdb_crypto *tc; struct secasvar *sav; struct secasindex *saidx; caddr_t ptr; crd = crp->crp_desc; IPSEC_ASSERT(crd != NULL, ("null crypto descriptor!")); tc = (struct tdb_crypto *) crp->crp_opaque; IPSEC_ASSERT(tc != NULL, ("null opaque crypto data area!")); skip = tc->tc_skip; protoff = tc->tc_protoff; m = (struct mbuf *) crp->crp_buf; sav = tc->tc_sav; IPSEC_ASSERT(sav != NULL, ("null SA!")); saidx = &sav->sah->saidx; IPSEC_ASSERT(saidx->dst.sa.sa_family == AF_INET || saidx->dst.sa.sa_family == AF_INET6, ("unexpected protocol family %u", saidx->dst.sa.sa_family)); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; /* Check for crypto errors */ if (crp->crp_etype) { /* Reset the session ID */ if (sav->tdb_cryptoid != 0) sav->tdb_cryptoid = crp->crp_sid; if (crp->crp_etype == EAGAIN) return (crypto_dispatch(crp)); ESPSTAT_INC(esps_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; goto bad; } /* Shouldn't happen... */ if (m == NULL) { ESPSTAT_INC(esps_crypto); DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } ESPSTAT_INC(esps_hist[sav->alg_enc]); /* If authentication was performed, check now. */ if (esph != NULL) { alen = xform_ah_authsize(esph); AHSTAT_INC(ahs_hist[sav->alg_auth]); /* Copy the authenticator from the packet */ m_copydata(m, m->m_pkthdr.len - alen, alen, aalg); ptr = (caddr_t) (tc + 1); /* Verify authenticator */ if (timingsafe_bcmp(ptr, aalg, alen) != 0) { DPRINTF(("%s: authentication hash mismatch for " "packet in SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_badauth); error = EACCES; goto bad; } /* Remove trailing authenticator */ m_adj(m, -alen); } /* Release the crypto descriptors */ free(tc, M_XDATA), tc = NULL; crypto_freereq(crp), crp = NULL; /* * Packet is now decrypted. */ m->m_flags |= M_DECRYPTED; /* * Update replay sequence number, if appropriate. */ if (sav->replay) { u_int32_t seq; m_copydata(m, skip + offsetof(struct newesp, esp_seq), sizeof (seq), (caddr_t) &seq); if (ipsec_updatereplay(ntohl(seq), sav)) { DPRINTF(("%s: packet replay check for %s\n", __func__, ipsec_logsastr(sav, buf, sizeof(buf)))); ESPSTAT_INC(esps_replay); error = ENOBUFS; goto bad; } }
static int ipsec_ocf_xmit_cb(struct cryptop *crp) { struct ipsec_xmit_state *ixs = (struct ipsec_xmit_state *)crp->crp_opaque; struct iphdr *newiph; struct ipcomphdr *cmph; unsigned orig_len, comp_len; struct cryptodesc *crdc = NULL; KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_ocf_xmit_cb\n"); if (ixs == NULL) { KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_ocf_xmit_cb: " "NULL ixs in callback\n"); return 0; } /* * we must update the state before returning to the state machine. * if we have an error, terminate the processing by moving to the DONE * state */ ixs->state = IPSEC_XSM_DONE; /* assume bad xmit */ if (crp->crp_etype) { ptrdiff_t ptr_delta; if (crp->crp_etype == EAGAIN) { /* Session has been migrated. Store the new session id and retry */ KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_ocf_xmit_cb: crypto session migrated\n"); ixs->ipsp->ocf_cryptoid = crp->crp_sid; /* resubmit request */ if (crypto_dispatch(crp) == 0) return 0; /* resubmit failed */ } KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_ocf_xmit_cb: " "error in processing 0x%x\n", crp->crp_etype); switch (ixs->ipsp->ips_said.proto) { case IPPROTO_COMP: /* * It's ok for compression to fail... we made a clone * of the packet, so we just revert it now... */ if (!ixs->pre_ipcomp_skb) { KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_ocf_xmit_cb: " "IPcomp on %u bytes failed, " "but we have no clone!\n", (unsigned int) (lsw_ip_hdr_version(ixs) == 6 ? (ntohs(lsw_ip6_hdr(ixs)-> payload_len) + sizeof(struct ipv6hdr)) : ntohs(lsw_ip4_hdr( ixs)->tot_len)) - ixs->iphlen); /* this is a fail. */ break; } KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_ocf_xmit_cb: " "IPcomp on %u bytes failed, " "using backup clone.\n", (unsigned int) (lsw_ip_hdr_version(ixs) == 6 ? (ntohs(lsw_ip6_hdr(ixs)->payload_len) + sizeof(struct ipv6hdr)) : ntohs(lsw_ip4_hdr(ixs)->tot_len)) - ixs->iphlen); ptr_delta = ixs->pre_ipcomp_skb->data - ixs->skb->data; ixs->iph = (void*)((char*)ixs->iph + ptr_delta); /* * can not free it here, because we are under * IRQ, potentially, so queue it for later */ kfree_skb(ixs->skb); ixs->skb = ixs->pre_ipcomp_skb; ixs->pre_ipcomp_skb = NULL; skb_set_network_header(ixs->skb, ipsec_skb_offset(ixs->skb, (( void *) skb_network_header( ixs-> skb)) + ptr_delta)); skb_set_transport_header(ixs->skb, ipsec_skb_offset(ixs->skb, (( void *) skb_transport_header( ixs -> skb)) + ptr_delta)); KLIPS_IP_PRINT(debug_tunnel & DB_TN_XMIT, ixs->iph); /* this means we don't compress */ ixs->state = IPSEC_XSM_CONT; break; } goto bail; } switch (ixs->ipsp->ips_said.proto) { case IPPROTO_ESP: /* ESP, nothing to do */ break; case IPPROTO_AH: /* AH post processing, put back fields we had to zero */ if (lsw_ip_hdr_version(ixs) == 4) { lsw_ip4_hdr(ixs)->ttl = ixs->ttl; lsw_ip4_hdr(ixs)->check = ixs->check; lsw_ip4_hdr(ixs)->frag_off = ixs->frag_off; lsw_ip4_hdr(ixs)->tos = ixs->tos; } break; case IPPROTO_COMP: /* IPcomp fill in the header */ crdc = crp->crp_desc; KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_ocf_xmit_cb: " "after <%s%s%s>, SA:%s:\n", IPS_XFORM_NAME(ixs->ipsp), ixs->sa_len ? ixs->sa_txt : " (error)"); KLIPS_IP_PRINT(debug_tunnel & DB_TN_XMIT, ixs->iph); orig_len = (lsw_ip_hdr_version(ixs) == 6 ? (ntohs(lsw_ip6_hdr(ixs)->payload_len) + sizeof(struct ipv6hdr)) : ntohs(lsw_ip4_hdr(ixs)->tot_len)) - ixs->iphlen; comp_len = crp->crp_olen; if (sysctl_ipsec_debug_ipcomp && sysctl_ipsec_debug_verbose) { ipsec_dmp_block("compress after", ((unsigned char*)ixs->iph) + ixs->iphlen, comp_len); } newiph = (struct iphdr *)((char*)ixs->iph - sizeof(struct ipcomphdr)); cmph = (struct ipcomphdr *)((char*)newiph + ixs->iphlen); /* move the ip header to make room for the new ipcomp header */ memmove(((unsigned char *) ixs->skb->data) - sizeof(struct ipcomphdr), ixs->skb->data, (((unsigned char *) ixs->iph) + ixs->iphlen) - ((unsigned char *) ixs->skb->data)); /* DAVIDM check for head room */ skb_push(ixs->skb, sizeof(struct ipcomphdr)); ixs->iph = newiph; skb_set_network_header(ixs->skb, ipsec_skb_offset(ixs->skb, newiph)); skb_set_transport_header(ixs->skb, ipsec_skb_offset(ixs->skb, newiph) + ixs->iphlen); /* now we can fill in the ipcomp header */ cmph->ipcomp_nh = ixs->next_header; cmph->ipcomp_flags = 0; cmph->ipcomp_cpi = htons((__u16)(ntohl(ixs->ipsp->ips_said.spi) & 0x0000ffff)); /* update the ip header to reflect the compression */ if (lsw_ip_hdr_version(ixs) == 6) { lsw_ip6_hdr(ixs)->nexthdr = IPPROTO_COMP; lsw_ip6_hdr(ixs)->payload_len = htons(ixs->iphlen + sizeof(struct ipcomphdr) + comp_len - sizeof(struct ipv6hdr)); } else { lsw_ip4_hdr(ixs)->protocol = IPPROTO_COMP; lsw_ip4_hdr(ixs)->tot_len = htons(ixs->iphlen + sizeof(struct ipcomphdr) + comp_len); lsw_ip4_hdr(ixs)->check = 0; lsw_ip4_hdr(ixs)->check = ip_fast_csum((char *) ixs->iph, lsw_ip4_hdr( ixs)->ihl); } /* Update skb length/tail by "unputting" the shrinkage */ safe_skb_put(ixs->skb, comp_len - orig_len); ixs->ipsp->ips_comp_adapt_skip = 0; ixs->ipsp->ips_comp_adapt_tries = 0; /* release the backup copy */ if (ixs->pre_ipcomp_skb) { kfree_skb(ixs->pre_ipcomp_skb); ixs->pre_ipcomp_skb = NULL; } KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_ocf_xmit_cb: " "after <%s%s%s>, SA:%s:\n", IPS_XFORM_NAME(ixs->ipsp), ixs->sa_len ? ixs->sa_txt : " (error)"); KLIPS_IP_PRINT(debug_tunnel & DB_TN_XMIT, ixs->iph); break; } /* all good */ ixs->state = IPSEC_XSM_CONT; bail: crypto_freereq(crp); crp = NULL; ipsec_ocf_queue_task(ipsec_xsm, ixs); return 0; }
/* * This is the main function responsible for cryptography (ie. communication * with crypto(9) subsystem). * * BIO_READ: * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> G_ELI_AUTH_RUN -> g_eli_auth_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> G_ELI_AUTH_RUN -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ void g_eli_auth_run(struct g_eli_worker *wr, struct bio *bp) { struct g_eli_softc *sc; struct cryptop *crp; struct cryptodesc *crde, *crda; struct uio *uio; struct iovec *iov; u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; off_t dstoff; int err, error; u_char *p, *data, *auth, *authkey, *plaindata; G_ELI_LOGREQ(3, bp, "%s", __func__); bp->bio_pflags = wr->w_number; sc = wr->w_softc; /* Sectorsize of decrypted provider eg. 4096. */ decr_secsize = bp->bio_to->sectorsize; /* The real sectorsize of encrypted provider, eg. 512. */ encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; /* Number of data bytes in one encrypted sector, eg. 480. */ data_secsize = sc->sc_data_per_sector; /* Number of sectors from decrypted provider, eg. 2. */ nsec = bp->bio_length / decr_secsize; /* Number of sectors from encrypted provider, eg. 18. */ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; /* Last sector number in every big sector, eg. 9. */ lsec = sc->sc_bytes_per_sector / encr_secsize; /* Destination offset, used for IV generation. */ dstoff = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; auth = NULL; /* Silence compiler warning. */ plaindata = bp->bio_data; if (bp->bio_cmd == BIO_READ) { data = bp->bio_driver2; auth = data + encr_secsize * nsec; p = auth + sc->sc_alen * nsec; } else { size_t size; size = encr_secsize * nsec; size += sizeof(*crp) * nsec; size += sizeof(*crde) * nsec; size += sizeof(*crda) * nsec; size += G_ELI_AUTH_SECKEYLEN * nsec; size += sizeof(*uio) * nsec; size += sizeof(*iov) * nsec; data = malloc(size, M_ELI, M_WAITOK); bp->bio_driver2 = data; p = data + encr_secsize * nsec; } bp->bio_inbed = 0; bp->bio_children = nsec; error = 0; for (i = 1; i <= nsec; i++, dstoff += encr_secsize) { crp = (struct cryptop *)p; p += sizeof(*crp); crde = (struct cryptodesc *)p; p += sizeof(*crde); crda = (struct cryptodesc *)p; p += sizeof(*crda); authkey = (u_char *)p; p += G_ELI_AUTH_SECKEYLEN; uio = (struct uio *)p; p += sizeof(*uio); iov = (struct iovec *)p; p += sizeof(*iov); data_secsize = sc->sc_data_per_sector; if ((i % lsec) == 0) data_secsize = decr_secsize % data_secsize; if (bp->bio_cmd == BIO_READ) { /* Remember read HMAC. */ bcopy(data, auth, sc->sc_alen); auth += sc->sc_alen; /* TODO: bzero(9) can be commented out later. */ bzero(data, sc->sc_alen); } else { bcopy(plaindata, data + sc->sc_alen, data_secsize); plaindata += data_secsize; } iov->iov_len = sc->sc_alen + data_secsize; iov->iov_base = data; data += encr_secsize; uio->uio_iov = iov; uio->uio_iovcnt = 1; uio->uio_segflg = UIO_SYSSPACE; uio->uio_resid = iov->iov_len; crp->crp_sid = wr->w_sid; crp->crp_ilen = uio->uio_resid; crp->crp_olen = data_secsize; crp->crp_opaque = (void *)bp; crp->crp_buf = (void *)uio; crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIFSYNC | CRYPTO_F_REL; if (g_eli_batch) crp->crp_flags |= CRYPTO_F_BATCH; if (bp->bio_cmd == BIO_WRITE) { crp->crp_callback = g_eli_auth_write_done; crp->crp_desc = crde; crde->crd_next = crda; crda->crd_next = NULL; } else { crp->crp_callback = g_eli_auth_read_done; crp->crp_desc = crda; crda->crd_next = crde; crde->crd_next = NULL; } crde->crd_skip = sc->sc_alen; crde->crd_len = data_secsize; crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) == 0) crde->crd_flags |= CRD_F_KEY_EXPLICIT; if (bp->bio_cmd == BIO_WRITE) crde->crd_flags |= CRD_F_ENCRYPT; crde->crd_alg = sc->sc_ealgo; crde->crd_key = g_eli_key_hold(sc, dstoff, encr_secsize); crde->crd_klen = sc->sc_ekeylen; if (sc->sc_ealgo == CRYPTO_AES_XTS) crde->crd_klen <<= 1; g_eli_crypto_ivgen(sc, dstoff, crde->crd_iv, sizeof(crde->crd_iv)); crda->crd_skip = sc->sc_alen; crda->crd_len = data_secsize; crda->crd_inject = 0; crda->crd_flags = CRD_F_KEY_EXPLICIT; crda->crd_alg = sc->sc_aalgo; g_eli_auth_keygen(sc, dstoff, authkey); crda->crd_key = authkey; crda->crd_klen = G_ELI_AUTH_SECKEYLEN * 8; crp->crp_etype = 0; err = crypto_dispatch(crp); if (err != 0 && error == 0) error = err; } if (bp->bio_error == 0) bp->bio_error = error; }
/* * ESP output callback from the crypto driver. */ static int esp_output_cb(struct cryptop *crp) { struct tdb_crypto *tc; struct ipsecrequest *isr; struct secasvar *sav; struct mbuf *m; int s, err, error; tc = (struct tdb_crypto *) crp->crp_opaque; KASSERT(tc != NULL, ("esp_output_cb: null opaque data area!")); m = (struct mbuf *) crp->crp_buf; s = splnet(); isr = tc->tc_isr; sav = KEY_ALLOCSA(&tc->tc_dst, tc->tc_proto, tc->tc_spi); if (sav == NULL) { espstat.esps_notdb++; DPRINTF(("esp_output_cb: SA expired while in crypto " "(SA %s/%08lx proto %u)\n", ipsec_address(&tc->tc_dst), (u_long) ntohl(tc->tc_spi), tc->tc_proto)); error = ENOBUFS; /*XXX*/ goto bad; } KASSERT(isr->sav == sav, ("esp_output_cb: SA changed was %p now %p\n", isr->sav, sav)); /* Check for crypto errors. */ if (crp->crp_etype) { /* Reset session ID. */ if (sav->tdb_cryptoid != 0) sav->tdb_cryptoid = crp->crp_sid; if (crp->crp_etype == EAGAIN) { KEY_FREESAV(&sav); splx(s); return crypto_dispatch(crp); } espstat.esps_noxform++; DPRINTF(("esp_output_cb: crypto error %d\n", crp->crp_etype)); error = crp->crp_etype; goto bad; } /* Shouldn't happen... */ if (m == NULL) { espstat.esps_crypto++; DPRINTF(("esp_output_cb: bogus returned buffer from crypto\n")); error = EINVAL; goto bad; } espstat.esps_hist[sav->alg_enc]++; if (sav->tdb_authalgxform != NULL) ahstat.ahs_hist[sav->alg_auth]++; /* Release crypto descriptors. */ free(tc, M_XDATA); crypto_freereq(crp); /* NB: m is reclaimed by ipsec_process_done. */ err = ipsec_process_done(m, isr); KEY_FREESAV(&sav); splx(s); return err; bad: if (sav) KEY_FREESAV(&sav); splx(s); if (m) m_freem(m); free(tc, M_XDATA); crypto_freereq(crp); return error; }
/* * ESP output routine, called by ipsec[46]_process_packet(). */ static int esp_output( struct mbuf *m, struct ipsecrequest *isr, struct mbuf **mp, int skip, int protoff ) { struct enc_xform *espx; struct auth_hash *esph; int hlen, rlen, plen, padding, blks, alen, i, roff; struct mbuf *mo = (struct mbuf *) NULL; struct tdb_crypto *tc; struct secasvar *sav; struct secasindex *saidx; unsigned char *pad; u_int8_t prot; int error, maxpacketsize; struct cryptodesc *crde = NULL, *crda = NULL; struct cryptop *crp; SPLASSERT(net, "esp_output"); sav = isr->sav; KASSERT(sav != NULL, ("esp_output: null SA")); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; KASSERT(espx != NULL, ("esp_output: null encoding xform")); if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; rlen = m->m_pkthdr.len - skip; /* Raw payload length. */ /* * NB: The null encoding transform has a blocksize of 4 * so that headers are properly aligned. */ blks = espx->blocksize; /* IV blocksize */ /* XXX clamp padding length a la KAME??? */ padding = ((blks - ((rlen + 2) % blks)) % blks) + 2; plen = rlen + padding; /* Padded payload length. */ if (esph) alen = AH_HMAC_HASHLEN; else alen = 0; espstat.esps_output++; saidx = &sav->sah->saidx; /* Check for maximum packet size violations. */ switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: maxpacketsize = IP_MAXPACKET; break; #endif /* INET */ #ifdef INET6 case AF_INET6: maxpacketsize = IPV6_MAXPACKET; break; #endif /* INET6 */ default: DPRINTF(("esp_output: unknown/unsupported protocol " "family %d, SA %s/%08lx\n", saidx->dst.sa.sa_family, ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); espstat.esps_nopf++; error = EPFNOSUPPORT; goto bad; } if (skip + hlen + rlen + padding + alen > maxpacketsize) { DPRINTF(("esp_output: packet in SA %s/%08lx got too big " "(len %u, max len %u)\n", ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi), skip + hlen + rlen + padding + alen, maxpacketsize)); espstat.esps_toobig++; error = EMSGSIZE; goto bad; } /* Update the counters. */ espstat.esps_obytes += m->m_pkthdr.len - skip; m = m_clone(m); if (m == NULL) { DPRINTF(("esp_output: cannot clone mbuf chain, SA %s/%08lx\n", ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); espstat.esps_hdrops++; error = ENOBUFS; goto bad; } /* Inject ESP header. */ mo = m_makespace(m, skip, hlen, &roff); if (mo == NULL) { DPRINTF(("esp_output: failed to inject %u byte ESP hdr for SA " "%s/%08lx\n", hlen, ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); espstat.esps_hdrops++; /* XXX diffs from openbsd */ error = ENOBUFS; goto bad; } /* Initialize ESP header. */ bcopy((caddr_t) &sav->spi, mtod(mo, caddr_t) + roff, sizeof(u_int32_t)); if (sav->replay) { u_int32_t replay = htonl(++(sav->replay->count)); bcopy((caddr_t) &replay, mtod(mo, caddr_t) + roff + sizeof(u_int32_t), sizeof(u_int32_t)); } /* * Add padding -- better to do it ourselves than use the crypto engine, * although if/when we support compression, we'd have to do that. */ pad = (u_char *) m_pad(m, padding + alen); if (pad == NULL) { DPRINTF(("esp_output: m_pad failed for SA %s/%08lx\n", ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); m = NULL; /* NB: free'd by m_pad */ error = ENOBUFS; goto bad; } /* * Add padding: random, zero, or self-describing. * XXX catch unexpected setting */ switch (sav->flags & SADB_X_EXT_PMASK) { case SADB_X_EXT_PRAND: (void) read_random(pad, padding - 2); break; case SADB_X_EXT_PZERO: bzero(pad, padding - 2); break; case SADB_X_EXT_PSEQ: for (i = 0; i < padding - 2; i++) pad[i] = i+1; break; } /* Fix padding length and Next Protocol in padding itself. */ pad[padding - 2] = padding - 2; m_copydata(m, protoff, sizeof(u_int8_t), pad + padding - 1); /* Fix Next Protocol in IPv4/IPv6 header. */ prot = IPPROTO_ESP; m_copyback(m, protoff, sizeof(u_int8_t), (u_char *) &prot); /* Get crypto descriptors. */ crp = crypto_getreq(esph && espx ? 2 : 1); if (crp == NULL) { DPRINTF(("esp_output: failed to acquire crypto descriptors\n")); espstat.esps_crypto++; error = ENOBUFS; goto bad; } if (espx) { crde = crp->crp_desc; crda = crde->crd_next; /* Encryption descriptor. */ crde->crd_skip = skip + hlen; crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen); crde->crd_flags = CRD_F_ENCRYPT; crde->crd_inject = skip + hlen - sav->ivlen; /* Encryption operation. */ crde->crd_alg = espx->type; crde->crd_key = _KEYBUF(sav->key_enc); crde->crd_klen = _KEYBITS(sav->key_enc); /* XXX Rounds ? */ } else crda = crp->crp_desc; /* IPsec-specific opaque crypto info. */ tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto), M_XDATA, M_NOWAIT|M_ZERO); if (tc == NULL) { crypto_freereq(crp); DPRINTF(("esp_output: failed to allocate tdb_crypto\n")); espstat.esps_crypto++; error = ENOBUFS; goto bad; } /* Callback parameters */ tc->tc_isr = isr; tc->tc_spi = sav->spi; tc->tc_dst = saidx->dst; tc->tc_proto = saidx->proto; /* Crypto operation descriptor. */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_flags = CRYPTO_F_IMBUF; crp->crp_buf = (caddr_t) m; crp->crp_callback = esp_output_cb; crp->crp_opaque = (caddr_t) tc; crp->crp_sid = sav->tdb_cryptoid; if (esph) { /* Authentication descriptor. */ crda->crd_skip = skip; crda->crd_len = m->m_pkthdr.len - (skip + alen); crda->crd_inject = m->m_pkthdr.len - alen; /* Authentication operation. */ crda->crd_alg = esph->type; crda->crd_key = _KEYBUF(sav->key_auth); crda->crd_klen = _KEYBITS(sav->key_auth); } return crypto_dispatch(crp); bad: if (m) m_freem(m); return (error); }
/* * ESP input callback from the crypto driver. */ static int esp_input_cb(struct cryptop *crp) { u_int8_t lastthree[3], aalg[AH_HMAC_HASHLEN]; int s, hlen, skip, protoff, error; struct mbuf *m; struct cryptodesc *crd; struct auth_hash *esph; struct enc_xform *espx; struct tdb_crypto *tc; struct m_tag *mtag; struct secasvar *sav; struct secasindex *saidx; caddr_t ptr; crd = crp->crp_desc; KASSERT(crd != NULL, ("esp_input_cb: null crypto descriptor!")); tc = (struct tdb_crypto *) crp->crp_opaque; KASSERT(tc != NULL, ("esp_input_cb: null opaque crypto data area!")); skip = tc->tc_skip; protoff = tc->tc_protoff; mtag = (struct m_tag *) tc->tc_ptr; m = (struct mbuf *) crp->crp_buf; s = splnet(); sav = KEY_ALLOCSA(&tc->tc_dst, tc->tc_proto, tc->tc_spi); if (sav == NULL) { espstat.esps_notdb++; DPRINTF(("esp_input_cb: SA expired while in crypto " "(SA %s/%08lx proto %u)\n", ipsec_address(&tc->tc_dst), (u_long) ntohl(tc->tc_spi), tc->tc_proto)); error = ENOBUFS; /*XXX*/ goto bad; } saidx = &sav->sah->saidx; KASSERT(saidx->dst.sa.sa_family == AF_INET || saidx->dst.sa.sa_family == AF_INET6, ("ah_input_cb: unexpected protocol family %u", saidx->dst.sa.sa_family)); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; /* Check for crypto errors */ if (crp->crp_etype) { /* Reset the session ID */ if (sav->tdb_cryptoid != 0) sav->tdb_cryptoid = crp->crp_sid; if (crp->crp_etype == EAGAIN) { KEY_FREESAV(&sav); splx(s); return crypto_dispatch(crp); } espstat.esps_noxform++; DPRINTF(("esp_input_cb: crypto error %d\n", crp->crp_etype)); error = crp->crp_etype; goto bad; } /* Shouldn't happen... */ if (m == NULL) { espstat.esps_crypto++; DPRINTF(("esp_input_cb: bogus returned buffer from crypto\n")); error = EINVAL; goto bad; } espstat.esps_hist[sav->alg_enc]++; /* If authentication was performed, check now. */ if (esph != NULL) { /* * If we have a tag, it means an IPsec-aware NIC did * the verification for us. Otherwise we need to * check the authentication calculation. */ ahstat.ahs_hist[sav->alg_auth]++; if (mtag == NULL) { /* Copy the authenticator from the packet */ m_copydata(m, m->m_pkthdr.len - esph->authsize, esph->authsize, aalg); ptr = (caddr_t) (tc + 1); /* Verify authenticator */ if (bcmp(ptr, aalg, esph->authsize) != 0) { DPRINTF(("esp_input_cb: " "authentication hash mismatch for packet in SA %s/%08lx\n", ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); espstat.esps_badauth++; error = EACCES; goto bad; } } /* Remove trailing authenticator */ m_adj(m, -(esph->authsize)); } /* Release the crypto descriptors */ free(tc, M_XDATA), tc = NULL; crypto_freereq(crp), crp = NULL; /* * Packet is now decrypted. */ m->m_flags |= M_DECRYPTED; /* Determine the ESP header length */ if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; /* Remove the ESP header and IV from the mbuf. */ error = m_striphdr(m, skip, hlen); if (error) { espstat.esps_hdrops++; DPRINTF(("esp_input_cb: bad mbuf chain, SA %s/%08lx\n", ipsec_address(&sav->sah->saidx.dst), (u_long) ntohl(sav->spi))); goto bad; } /* Save the last three bytes of decrypted data */ m_copydata(m, m->m_pkthdr.len - 3, 3, lastthree); /* Verify pad length */ if (lastthree[1] + 2 > m->m_pkthdr.len - skip) { espstat.esps_badilen++; DPRINTF(("esp_input_cb: invalid padding length %d " "for %u byte packet in SA %s/%08lx\n", lastthree[1], m->m_pkthdr.len - skip, ipsec_address(&sav->sah->saidx.dst), (u_long) ntohl(sav->spi))); error = EINVAL; goto bad; } /* Verify correct decryption by checking the last padding bytes */ if ((sav->flags & SADB_X_EXT_PMASK) != SADB_X_EXT_PRAND) { if (lastthree[1] != lastthree[0] && lastthree[1] != 0) { espstat.esps_badenc++; DPRINTF(("esp_input_cb: decryption failed " "for packet in SA %s/%08lx\n", ipsec_address(&sav->sah->saidx.dst), (u_long) ntohl(sav->spi))); DPRINTF(("esp_input_cb: %x %x\n", lastthree[0], lastthree[1])); error = EINVAL; goto bad; } } /* Trim the mbuf chain to remove trailing authenticator and padding */ m_adj(m, -(lastthree[1] + 2)); /* Restore the Next Protocol field */ m_copyback(m, protoff, sizeof (u_int8_t), lastthree + 2); IPSEC_COMMON_INPUT_CB(m, sav, skip, protoff, mtag); KEY_FREESAV(&sav); splx(s); return error; bad: if (sav) KEY_FREESAV(&sav); splx(s); if (m != NULL) m_freem(m); if (tc != NULL) free(tc, M_XDATA); if (crp != NULL) crypto_freereq(crp); return error; }
/* * ESP input callback from the crypto driver. */ static int esp_input_cb(struct cryptop *crp) { u_int8_t lastthree[3], aalg[AH_HMAC_MAXHASHLEN]; int hlen, skip, protoff, error, alen; struct mbuf *m; struct cryptodesc *crd; struct auth_hash *esph; struct enc_xform *espx; struct tdb_crypto *tc; struct m_tag *mtag; struct secasvar *sav; struct secasindex *saidx; caddr_t ptr; crd = crp->crp_desc; IPSEC_ASSERT(crd != NULL, ("null crypto descriptor!")); tc = (struct tdb_crypto *) crp->crp_opaque; IPSEC_ASSERT(tc != NULL, ("null opaque crypto data area!")); skip = tc->tc_skip; protoff = tc->tc_protoff; mtag = (struct m_tag *) tc->tc_ptr; m = (struct mbuf *) crp->crp_buf; sav = tc->tc_sav; IPSEC_ASSERT(sav != NULL, ("null SA!")); saidx = &sav->sah->saidx; IPSEC_ASSERT(saidx->dst.sa.sa_family == AF_INET || saidx->dst.sa.sa_family == AF_INET6, ("unexpected protocol family %u", saidx->dst.sa.sa_family)); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; /* Check for crypto errors */ if (crp->crp_etype) { /* Reset the session ID */ if (sav->tdb_cryptoid != 0) sav->tdb_cryptoid = crp->crp_sid; if (crp->crp_etype == EAGAIN) return (crypto_dispatch(crp)); V_espstat.esps_noxform++; DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; goto bad; } /* Shouldn't happen... */ if (m == NULL) { V_espstat.esps_crypto++; DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } V_espstat.esps_hist[sav->alg_enc]++; /* If authentication was performed, check now. */ if (esph != NULL) { switch (esph->type) { case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: alen = esph->hashsize/2; break; default: alen = AH_HMAC_HASHLEN; break; } /* * If we have a tag, it means an IPsec-aware NIC did * the verification for us. Otherwise we need to * check the authentication calculation. */ V_ahstat.ahs_hist[sav->alg_auth]++; if (mtag == NULL) { /* Copy the authenticator from the packet */ m_copydata(m, m->m_pkthdr.len - alen, alen, aalg); ptr = (caddr_t) (tc + 1); /* Verify authenticator */ if (bcmp(ptr, aalg, alen) != 0) { DPRINTF(("%s: " "authentication hash mismatch for packet in SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); V_espstat.esps_badauth++; error = EACCES; goto bad; } } /* Remove trailing authenticator */ m_adj(m, -alen); } /* Release the crypto descriptors */ free(tc, M_XDATA), tc = NULL; crypto_freereq(crp), crp = NULL; /* * Packet is now decrypted. */ m->m_flags |= M_DECRYPTED; /* * Update replay sequence number, if appropriate. */ if (sav->replay) { u_int32_t seq; m_copydata(m, skip + offsetof(struct newesp, esp_seq), sizeof (seq), (caddr_t) &seq); if (ipsec_updatereplay(ntohl(seq), sav)) { DPRINTF(("%s: packet replay check for %s\n", __func__, ipsec_logsastr(sav))); V_espstat.esps_replay++; error = ENOBUFS; goto bad; } }
/* * ESP input callback from the crypto driver. */ static int esp_input_cb(struct cryptop *crp) { u_int8_t lastthree[3], aalg[AH_ALEN_MAX]; int s, hlen, skip, protoff, error; struct mbuf *m; struct cryptodesc *crd; const struct auth_hash *esph; const struct enc_xform *espx; struct tdb_crypto *tc; struct m_tag *mtag; struct secasvar *sav; struct secasindex *saidx; void *ptr; u_int16_t dport; u_int16_t sport; crd = crp->crp_desc; IPSEC_ASSERT(crd != NULL, ("esp_input_cb: null crypto descriptor!")); tc = (struct tdb_crypto *) crp->crp_opaque; IPSEC_ASSERT(tc != NULL, ("esp_input_cb: null opaque crypto data area!")); skip = tc->tc_skip; protoff = tc->tc_protoff; mtag = (struct m_tag *) tc->tc_ptr; m = (struct mbuf *) crp->crp_buf; /* find the source port for NAT-T */ nat_t_ports_get(m, &dport, &sport); s = splsoftnet(); mutex_enter(softnet_lock); sav = KEY_ALLOCSA(&tc->tc_dst, tc->tc_proto, tc->tc_spi, sport, dport); if (sav == NULL) { ESP_STATINC(ESP_STAT_NOTDB); DPRINTF(("esp_input_cb: SA expired while in crypto " "(SA %s/%08lx proto %u)\n", ipsec_address(&tc->tc_dst), (u_long) ntohl(tc->tc_spi), tc->tc_proto)); error = ENOBUFS; /*XXX*/ goto bad; } saidx = &sav->sah->saidx; IPSEC_ASSERT(saidx->dst.sa.sa_family == AF_INET || saidx->dst.sa.sa_family == AF_INET6, ("esp_input_cb: unexpected protocol family %u", saidx->dst.sa.sa_family)); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; /* Check for crypto errors */ if (crp->crp_etype) { /* Reset the session ID */ if (sav->tdb_cryptoid != 0) sav->tdb_cryptoid = crp->crp_sid; if (crp->crp_etype == EAGAIN) { KEY_FREESAV(&sav); mutex_exit(softnet_lock); splx(s); return crypto_dispatch(crp); } ESP_STATINC(ESP_STAT_NOXFORM); DPRINTF(("esp_input_cb: crypto error %d\n", crp->crp_etype)); error = crp->crp_etype; goto bad; } /* Shouldn't happen... */ if (m == NULL) { ESP_STATINC(ESP_STAT_CRYPTO); DPRINTF(("esp_input_cb: bogus returned buffer from crypto\n")); error = EINVAL; goto bad; } ESP_STATINC(ESP_STAT_HIST + sav->alg_enc); /* If authentication was performed, check now. */ if (esph != NULL) { /* * If we have a tag, it means an IPsec-aware NIC did * the verification for us. Otherwise we need to * check the authentication calculation. */ AH_STATINC(AH_STAT_HIST + sav->alg_auth); if (mtag == NULL) { /* Copy the authenticator from the packet */ m_copydata(m, m->m_pkthdr.len - esph->authsize, esph->authsize, aalg); ptr = (tc + 1); /* Verify authenticator */ if (!consttime_memequal(ptr, aalg, esph->authsize)) { DPRINTF(("esp_input_cb: " "authentication hash mismatch for packet in SA %s/%08lx\n", ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); ESP_STATINC(ESP_STAT_BADAUTH); error = EACCES; goto bad; } } /* Remove trailing authenticator */ m_adj(m, -(esph->authsize)); } /* Release the crypto descriptors */ free(tc, M_XDATA), tc = NULL; crypto_freereq(crp), crp = NULL; /* * Packet is now decrypted. */ m->m_flags |= M_DECRYPTED; /* * Update replay sequence number, if appropriate. */ if (sav->replay) { u_int32_t seq; m_copydata(m, skip + offsetof(struct newesp, esp_seq), sizeof (seq), &seq); if (ipsec_updatereplay(ntohl(seq), sav)) { DPRINTF(("%s: packet replay check for %s\n", __func__, ipsec_logsastr(sav))); ESP_STATINC(ESP_STAT_REPLAY); error = ENOBUFS; goto bad; } }
static int ipsec_ocf_rcv_cb(struct cryptop *crp) { struct ipsec_rcv_state *irs = (struct ipsec_rcv_state *)crp->crp_opaque; struct iphdr *newiph; unsigned orig_len, decomp_len; struct cryptodesc *crdc = NULL; KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv_cb\n"); if (irs == NULL) { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv_cb: " "NULL irs in callback\n"); return 0; } /* * we must update the state before returning to the state machine. * if we have an error, terminate the processing by moving to the DONE * state */ irs->state = IPSEC_RSM_DONE; /* assume it went badly */ if (crp->crp_etype) { ptrdiff_t ptr_delta; if (crp->crp_etype == EAGAIN) { /* Session has been migrated. Store the new session id and retry */ KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv_cb: crypto session migrated\n"); irs->ipsp->ocf_cryptoid = crp->crp_sid; /* resubmit request */ if (crypto_dispatch(crp) == 0) return 0; /* resubmit failed */ } KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv_cb: " "error in processing 0x%x\n", crp->crp_etype); switch (irs->ipsp->ips_said.proto) { case IPPROTO_COMP: /* * we restore the previous skb on error and pretend nothing * happened, just no compression */ ptr_delta = irs->pre_ipcomp_skb->data - irs->skb->data; irs->authenticator = (void*)((char*)irs->authenticator + ptr_delta); irs->iph = (void*)((char*)irs->iph + ptr_delta); kfree_skb(irs->skb); irs->skb = irs->pre_ipcomp_skb; irs->pre_ipcomp_skb = NULL; break; } goto bail; } switch (irs->ipsp->ips_said.proto) { case IPPROTO_ESP: /* ESP, process it */ if (ipsec_rcv_esp_post_decrypt(irs) == IPSEC_RCV_OK) { /* this one came up good, set next state */ irs->state = IPSEC_RSM_DECAP_CONT; } break; case IPPROTO_AH: /* AH post processing, put back fields we had to zero */ if (lsw_ip_hdr_version(irs) == 4) { lsw_ip4_hdr(irs)->ttl = irs->ttl; lsw_ip4_hdr(irs)->check = irs->check; lsw_ip4_hdr(irs)->frag_off = irs->frag_off; lsw_ip4_hdr(irs)->tos = irs->tos; } irs->state = IPSEC_RSM_AUTH_CHK; /* pull up the IP header again after processing */ skb_pull(irs->skb, ((unsigned char *)irs->protostuff.ahstuff.ahp) - ((unsigned char *)irs->iph)); break; case IPPROTO_COMP: crdc = crp->crp_desc; KLIPS_PRINT(debug_rcv, "comp before adjustments:\n"); KLIPS_IP_PRINT(debug_rcv & DB_TN_XMIT, irs->iph); orig_len = irs->skb->len - sizeof(struct ipcomphdr); decomp_len = crp->crp_olen; newiph = (struct iphdr*)((char*)irs->iph + sizeof(struct ipcomphdr)); KLIPS_PRINT(debug_rcv, "comp results: olen: %u, inject: %u (len=%d) iph->totlen=%u\n", crp->crp_olen, crdc->crd_inject, decomp_len, ntohs(newiph->tot_len)); /* * move the ip header to consume room previously taken by * the ipcomp header */ skb_pull(irs->skb, sizeof(struct ipcomphdr)); memmove(newiph, irs->iph, irs->iphlen); /* adjust the ipp pointer to point to the header we decoded */ irs->iph = newiph; skb_set_network_header(irs->skb, ipsec_skb_offset(irs->skb, ((unsigned char *) skb_network_header( irs -> skb)) + sizeof(struct ipcomphdr))); skb_set_transport_header(irs->skb, ipsec_skb_offset(irs->skb, ((unsigned char *) skb_transport_header( irs -> skb)) + sizeof( struct ipcomphdr))); if (lsw_ip_hdr_version(irs) == 6) { lsw_ip6_hdr(irs)->nexthdr = irs->next_header; } else { lsw_ip4_hdr(irs)->protocol = irs->next_header; lsw_ip4_hdr(irs)->tot_len = htons( irs->iphlen + decomp_len); lsw_ip4_hdr(irs)->check = 0; lsw_ip4_hdr(irs)->check = ip_fast_csum(irs->iph, lsw_ip4_hdr( irs)->ihl); } KLIPS_PRINT(debug_rcv, "comp after len adjustments:\n"); KLIPS_IP_PRINT(debug_rcv & DB_TN_XMIT, irs->iph); /* Update skb length/tail by "putting" the growth */ safe_skb_put(irs->skb, decomp_len - crp->crp_olen); /* set the new header in the skb */ skb_set_network_header(irs->skb, ipsec_skb_offset(irs->skb, irs->iph)); KLIPS_IP_PRINT(debug_rcv & DB_RX_PKTRX, ip_hdr(irs->skb)); /* relese the backup copy */ if (irs->pre_ipcomp_skb) { kfree_skb(irs->pre_ipcomp_skb); irs->pre_ipcomp_skb = NULL; } /* IPcomp finished, continue processing */ irs->state = IPSEC_RSM_DECAP_CONT; break; } bail: crypto_freereq(crp); crp = NULL; ipsec_ocf_queue_task(ipsec_rsm, irs); return 0; }
enum ipsec_rcv_value ipsec_ocf_rcv(struct ipsec_rcv_state *irs) { struct cryptop *crp; struct cryptodesc *crde = NULL, *crda = NULL, *crdc = NULL; struct ipsec_sa *ipsp; int req_count = 0; int rc, err; KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv\n"); ipsp = irs->ipsp; if (!ipsp) { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv: " "no SA for rcv processing\n"); return IPSEC_RCV_SAIDNOTFOUND; } if (!irs->skb) { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv: no skb\n"); return IPSEC_RCV_SAIDNOTFOUND; } switch (ipsp->ips_said.proto) { case IPPROTO_COMP: rc = ipsec_ocf_ipcomp_copy_expand(irs); if (rc != IPSEC_RCV_OK) { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv: " "growing skb for ipcomp failed, rc=%d\n", rc); return rc; } break; case IPPROTO_ESP: case IPPROTO_AH: break; default: KLIPS_PRINT(debug_rcv & DB_RX_XF, "klips_debug:ipsec_ocf_rcv: " "bad protocol %d\n", ipsp->ips_said.proto); return IPSEC_RCV_BADPROTO; } req_count = (ipsp->ips_authalg ? 1 : 0) + (ipsp->ips_encalg ? 1 : 0); crp = crypto_getreq(req_count); if (!crp) { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv: " "crypto_getreq returned NULL\n"); return IPSEC_RCV_REALLYBAD; } /* we currently don't support any chaining across protocols */ switch (ipsp->ips_said.proto) { case IPPROTO_ESP: /* * we are decrypting, from the setup in ipsec_ocf_sa_init above, we * need to flip the order of hash/cipher for recieve so that it is * hash first then decrypt. Transmit is ok. */ if (crp->crp_desc && crp->crp_desc->crd_next) { crda = crp->crp_desc; crde = crda->crd_next; } else { crde = crp->crp_desc; crda = crde->crd_next; } break; case IPPROTO_COMP: crdc = crp->crp_desc; break; case IPPROTO_AH: crda = crp->crp_desc; break; } if (crda) { /* Authentication descriptor */ crda->crd_alg = ipsec_ocf_authalg(ipsp->ips_authalg); if (!crda->crd_alg) { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv: " "bad auth alg 0x%x\n", ipsp->ips_authalg); crypto_freereq(crp); return IPSEC_RCV_BADPROTO; } if (!crde) { /* assuming AH processing */ /* push the IP header so we can authenticate it */ skb_push(irs->skb, ((unsigned char *)irs->protostuff.ahstuff.ahp) - ((unsigned char *)irs->iph)); } crda->crd_key = ipsp->ips_key_a; crda->crd_klen = ipsp->ips_key_bits_a; crda->crd_inject = irs->authenticator - irs->skb->data; /* OCF needs cri_mlen initialized in order to properly migrate the * session to another driver */ crda->crd_mlen = 12; /* Copy the authenticator to check aganinst later */ memcpy(irs->hash, irs->authenticator, 12); if (!crde) { /* assume AH processing */ /* AH processing, save fields we have to zero */ if (lsw_ip_hdr_version(irs) == 4) { irs->ttl = lsw_ip4_hdr(irs)->ttl; irs->check = lsw_ip4_hdr(irs)->check; irs->frag_off = lsw_ip4_hdr(irs)->frag_off; irs->tos = lsw_ip4_hdr(irs)->tos; lsw_ip4_hdr(irs)->ttl = 0; lsw_ip4_hdr(irs)->check = 0; lsw_ip4_hdr(irs)->frag_off = 0; lsw_ip4_hdr(irs)->tos = 0; } crda->crd_len = irs->skb->len; crda->crd_skip = ((unsigned char *)irs->iph) - irs->skb->data; memset(irs->authenticator, 0, 12); } else { crda->crd_len = irs->ilen; crda->crd_skip = ((unsigned char *) irs->protostuff.espstuff. espp) - irs->skb->data; /* * It would be nice to clear the authenticator here * to be sure we do not see it again later when checking. * We cannot. Some HW actually expects to check the in-data * hash and and flag an error if it is incorrect. * * What we do to allow this is to pass in the current in-data * value. Your OCF driver must ensure that it fails a request * for hash+decrypt with an invalid hash value, or returns the * computed in-data hash as requested. * * If your driver does not check the in-data hash but just * computes it value, you must ensure that it does not return * the original in-data hash by accident. It must invalidate the * in-data hash itself to force an auth check error. * * All existing drivers that do not care about the current * in-data hash do this by clearing the in-data hash before * processing, either directly or via their implementation. */ #if 0 memset(irs->authenticator, 0, 12); #endif } } if (crde) { crde->crd_alg = ipsec_ocf_encalg(ipsp->ips_encalg); if (!crde->crd_alg) { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv: " "bad enc alg 0x%x\n", ipsp->ips_encalg); crypto_freereq(crp); return IPSEC_RCV_BADPROTO; } irs->esphlen = ESP_HEADER_LEN + ipsp->ips_iv_size; irs->ilen -= irs->esphlen; crde->crd_skip = (skb_transport_header(irs->skb) - irs->skb->data) + irs->esphlen; crde->crd_len = irs->ilen; crde->crd_inject = crde->crd_skip - ipsp->ips_iv_size; crde->crd_klen = ipsp->ips_key_bits_e; crde->crd_key = ipsp->ips_key_e; } if (crdc) { struct ipcomphdr *cmph; int compalg = ipsp->ips_encalg; /* Decompression descriptor */ crdc->crd_alg = ipsec_ocf_compalg(compalg); if (!crdc->crd_alg) { KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv: " "bad decomp alg 0x%x\n", ipsp->ips_encalg); crypto_freereq(crp); return IPSEC_RCV_BADPROTO; } crdc->crd_flags = 0; /* this is where the current ipcomp header is */ cmph = (struct ipcomphdr*)((char*)irs->iph + irs->iphlen); /* store the nested protocol */ irs->next_header = cmph->ipcomp_nh; /* start decompressing after ip header and the ipcomp header */ crdc->crd_skip = ((unsigned char*)irs->iph) + irs->iphlen + sizeof(struct ipcomphdr) - irs->skb->data; /* decompress all ip data past the ipcomp header */ if (lsw_ip_hdr_version(irs) == 6) { crdc->crd_len = (ntohs(lsw_ip6_hdr(irs)->payload_len) + sizeof(struct ipv6hdr)) - irs->iphlen - sizeof(struct ipcomphdr); } else { crdc->crd_len = ntohs(lsw_ip4_hdr(irs)->tot_len) - irs->iphlen - sizeof(struct ipcomphdr); } /* decompress inplace (some hardware can only do inplace) */ crdc->crd_inject = crdc->crd_skip; } crp->crp_ilen = irs->skb->len; /* Total input length */ crp->crp_olen = irs->skb->len; /* Total output length */ crp->crp_flags = CRYPTO_F_SKBUF | (ipsec_ocf_cbimm ? CRYPTO_F_BATCH : 0) | (ipsec_ocf_batch ? CRYPTO_F_BATCH : 0) | 0; crp->crp_buf = (caddr_t) irs->skb; crp->crp_callback = ipsec_ocf_rcv_cb; crp->crp_sid = ipsp->ocf_cryptoid; crp->crp_opaque = (caddr_t) irs; rcv_migrate: if ((err = crypto_dispatch(crp))) { KLIPS_PRINT(debug_rcv, "crypto_dispatch rcv failure %u\n", err); crypto_freereq(crp); return IPSEC_RCV_REALLYBAD; } if (crp->crp_etype == EAGAIN) { /* Session has been migrated. Store the new session id and retry */ ipsp->ocf_cryptoid = crp->crp_sid; goto rcv_migrate; } return IPSEC_RCV_PENDING; }
static int ocf_cb(struct cryptop *crp) { request_t *r = (request_t *) crp->crp_opaque; crypto_freereq(crp); crp = NULL; total++; #ifdef DECC if(r->encrypt == 0) r->encrypt = 1; else r->encrypt = 0; if ((total > request_num) && r->encrypt) { #else if (total > request_num) { #endif outstanding--; return 0; } INIT_WORK(&r->work, ocf_request, r); schedule_work(&r->work); return 0; } static void ocf_request(void *arg) { request_t *r = arg; #ifdef BOTH struct cryptop *crp = crypto_getreq(2); #else int i; struct cryptop *crp = crypto_getreq(1); #endif struct cryptodesc *crde, *crda; int timeout = 0; if (!crp) { outstanding--; return; } #ifdef BOTH if(r->encrypt) { crde = crp->crp_desc; crda = crde->crd_next; } else { crda = crp->crp_desc; crde = crda->crd_next; } crda->crd_skip = 0; crda->crd_flags = 0; crda->crd_len = request_size; #ifdef DECC if( r->encrypt ) crda->crd_inject = request_size + 32; else crda->crd_inject = request_size + 64; #else crda->crd_inject = request_size + 32; #endif crda->crd_alg = CRYPTO_MD5_HMAC; crda->crd_key = "0123456789abcdefghij"; crda->crd_klen = 20 * 8; #else /* BOTH */ crde = crp->crp_desc; #endif /* BOTH */ crde->crd_skip = 0; #ifdef DECC if( r->encrypt ) crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT; else crde->crd_flags = CRD_F_IV_EXPLICIT; #else crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT; #endif crde->crd_len = request_size; memset(crde->crd_iv, 'a' ,EALG_MAX_BLOCK_LEN); crde->crd_inject = request_size; crde->crd_alg = CRYPTO_3DES_CBC; crde->crd_key = "0123456789abcdefghijklmn"; crde->crd_klen = 24 * 8; crp->crp_ilen = request_size + 128; crp->crp_flags = CRYPTO_F_CBIMM; crp->crp_buf = (caddr_t) r->buffer; crp->crp_callback = ocf_cb; crp->crp_sid = ocf_cryptoid; crp->crp_opaque = (caddr_t) r; while(crypto_dispatch(crp) != 0) { if(timeout > 1000) printk("Failed to insert request \n"); udelay(1); timeout++; } }
/* * ESP input processing, called (eventually) through the protocol switch. */ static int esp_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff) { char buf[128]; struct auth_hash *esph; struct enc_xform *espx; struct tdb_crypto *tc; uint8_t *ivp; int plen, alen, hlen; struct newesp *esp; struct cryptodesc *crde; struct cryptop *crp; IPSEC_ASSERT(sav != NULL, ("null SA")); IPSEC_ASSERT(sav->tdb_encalgxform != NULL, ("null encoding xform")); /* Valid IP Packet length ? */ if ( (skip&3) || (m->m_pkthdr.len&3) ){ DPRINTF(("%s: misaligned packet, skip %u pkt len %u", __func__, skip, m->m_pkthdr.len)); ESPSTAT_INC(esps_badilen); m_freem(m); return EINVAL; } /* XXX don't pullup, just copy header */ IP6_EXTHDR_GET(esp, struct newesp *, m, skip, sizeof (struct newesp)); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; /* Determine the ESP header and auth length */ if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; alen = xform_ah_authsize(esph); /* * Verify payload length is multiple of encryption algorithm * block size. * * NB: This works for the null algorithm because the blocksize * is 4 and all packets must be 4-byte aligned regardless * of the algorithm. */ plen = m->m_pkthdr.len - (skip + hlen + alen); if ((plen & (espx->blocksize - 1)) || (plen <= 0)) { DPRINTF(("%s: payload of %d octets not a multiple of %d octets," " SA %s/%08lx\n", __func__, plen, espx->blocksize, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long)ntohl(sav->spi))); ESPSTAT_INC(esps_badilen); m_freem(m); return EINVAL; } /* * Check sequence number. */ if (esph != NULL && sav->replay != NULL && !ipsec_chkreplay(ntohl(esp->esp_seq), sav)) { DPRINTF(("%s: packet replay check for %s\n", __func__, ipsec_logsastr(sav, buf, sizeof(buf)))); /*XXX*/ ESPSTAT_INC(esps_replay); m_freem(m); return ENOBUFS; /*XXX*/ } /* Update the counters */ ESPSTAT_ADD(esps_ibytes, m->m_pkthdr.len - (skip + hlen + alen)); /* Get crypto descriptors */ crp = crypto_getreq(esph && espx ? 2 : 1); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptors\n", __func__)); ESPSTAT_INC(esps_crypto); m_freem(m); return ENOBUFS; } /* Get IPsec-specific opaque pointer */ tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto) + alen, M_XDATA, M_NOWAIT | M_ZERO); if (tc == NULL) { crypto_freereq(crp); DPRINTF(("%s: failed to allocate tdb_crypto\n", __func__)); ESPSTAT_INC(esps_crypto); m_freem(m); return ENOBUFS; } if (esph != NULL) { struct cryptodesc *crda = crp->crp_desc; IPSEC_ASSERT(crda != NULL, ("null ah crypto descriptor")); /* Authentication descriptor */ crda->crd_skip = skip; if (SAV_ISGCM(sav)) crda->crd_len = 8; /* RFC4106 5, SPI + SN */ else crda->crd_len = m->m_pkthdr.len - (skip + alen); crda->crd_inject = m->m_pkthdr.len - alen; crda->crd_alg = esph->type; /* Copy the authenticator */ m_copydata(m, m->m_pkthdr.len - alen, alen, (caddr_t) (tc + 1)); /* Chain authentication request */ crde = crda->crd_next; } else { crde = crp->crp_desc; } /* Crypto operation descriptor */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length */ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; crp->crp_buf = (caddr_t) m; crp->crp_callback = esp_input_cb; crp->crp_sid = sav->tdb_cryptoid; crp->crp_opaque = (caddr_t) tc; /* These are passed as-is to the callback */ tc->tc_spi = sav->spi; tc->tc_dst = sav->sah->saidx.dst; tc->tc_proto = sav->sah->saidx.proto; tc->tc_protoff = protoff; tc->tc_skip = skip; KEY_ADDREFSA(sav); tc->tc_sav = sav; /* Decryption descriptor */ IPSEC_ASSERT(crde != NULL, ("null esp crypto descriptor")); crde->crd_skip = skip + hlen; crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen); crde->crd_inject = skip + hlen - sav->ivlen; if (SAV_ISCTRORGCM(sav)) { ivp = &crde->crd_iv[0]; /* GCM IV Format: RFC4106 4 */ /* CTR IV Format: RFC3686 4 */ /* Salt is last four bytes of key, RFC4106 8.1 */ /* Nonce is last four bytes of key, RFC3686 5.1 */ memcpy(ivp, sav->key_enc->key_data + _KEYLEN(sav->key_enc) - 4, 4); if (SAV_ISCTR(sav)) { /* Initial block counter is 1, RFC3686 4 */ be32enc(&ivp[sav->ivlen + 4], 1); } m_copydata(m, skip + hlen - sav->ivlen, sav->ivlen, &ivp[4]); crde->crd_flags |= CRD_F_IV_EXPLICIT; } crde->crd_alg = espx->type; return (crypto_dispatch(crp)); }
/* * ESP input processing, called (eventually) through the protocol switch. */ int esp_input(struct mbuf *m, struct tdb *tdb, int skip, int protoff) { struct auth_hash *esph = (struct auth_hash *) tdb->tdb_authalgxform; struct enc_xform *espx = (struct enc_xform *) tdb->tdb_encalgxform; struct tdb_crypto *tc; int plen, alen, hlen; struct m_tag *mtag; u_int32_t btsx; struct cryptodesc *crde = NULL, *crda = NULL; struct cryptop *crp; /* Determine the ESP header length */ if (tdb->tdb_flags & TDBF_NOREPLAY) hlen = sizeof(u_int32_t) + tdb->tdb_ivlen; /* "old" ESP */ else hlen = 2 * sizeof(u_int32_t) + tdb->tdb_ivlen; /* "new" ESP */ if (esph) alen = AH_HMAC_HASHLEN; else alen = 0; plen = m->m_pkthdr.len - (skip + hlen + alen); if (plen <= 0) { DPRINTF(("esp_input: invalid payload length\n")); espstat.esps_badilen++; m_freem(m); return EINVAL; } if (espx) { /* * Verify payload length is multiple of encryption algorithm * block size. */ if (plen & (espx->blocksize - 1)) { DPRINTF(("esp_input(): payload of %d octets not a multiple of %d octets, SA %s/%08x\n", plen, espx->blocksize, ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); espstat.esps_badilen++; m_freem(m); return EINVAL; } } /* Replay window checking, if appropriate -- no value commitment. */ if ((tdb->tdb_wnd > 0) && (!(tdb->tdb_flags & TDBF_NOREPLAY))) { m_copydata(m, skip + sizeof(u_int32_t), sizeof(u_int32_t), (unsigned char *) &btsx); btsx = ntohl(btsx); switch (checkreplaywindow32(btsx, 0, &(tdb->tdb_rpl), tdb->tdb_wnd, &(tdb->tdb_bitmap), 0)) { case 0: /* All's well */ break; case 1: m_freem(m); DPRINTF(("esp_input(): replay counter wrapped for SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); espstat.esps_wrap++; return EACCES; case 2: case 3: DPRINTF(("esp_input(): duplicate packet received in SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); m_freem(m); return EACCES; default: m_freem(m); DPRINTF(("esp_input(): bogus value from checkreplaywindow32() in SA %s/%08x\n", ipsp_address(tdb->tdb_dst), ntohl(tdb->tdb_spi))); espstat.esps_replay++; return EACCES; } } /* Update the counters */ tdb->tdb_cur_bytes += m->m_pkthdr.len - skip - hlen - alen; espstat.esps_ibytes += m->m_pkthdr.len - skip - hlen - alen; /* Hard expiration */ if ((tdb->tdb_flags & TDBF_BYTES) && (tdb->tdb_cur_bytes >= tdb->tdb_exp_bytes)) { pfkeyv2_expire(tdb, SADB_EXT_LIFETIME_HARD); tdb_delete(tdb); m_freem(m); return ENXIO; } /* Notify on soft expiration */ if ((tdb->tdb_flags & TDBF_SOFT_BYTES) && (tdb->tdb_cur_bytes >= tdb->tdb_soft_bytes)) { pfkeyv2_expire(tdb, SADB_EXT_LIFETIME_SOFT); tdb->tdb_flags &= ~TDBF_SOFT_BYTES; /* Turn off checking */ } #ifdef notyet /* Find out if we've already done crypto */ for (mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_CRYPTO_DONE, NULL); mtag != NULL; mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_CRYPTO_DONE, mtag)) { struct tdb_ident *tdbi; tdbi = (struct tdb_ident *) (mtag + 1); if (tdbi->proto == tdb->tdb_sproto && tdbi->spi == tdb->tdb_spi && !bcmp(&tdbi->dst, &tdb->tdb_dst, sizeof(union sockaddr_union))) break; } #else mtag = NULL; #endif /* Get crypto descriptors */ crp = crypto_getreq(esph && espx ? 2 : 1); if (crp == NULL) { m_freem(m); DPRINTF(("esp_input(): failed to acquire crypto descriptors\n")); espstat.esps_crypto++; return ENOBUFS; } /* Get IPsec-specific opaque pointer */ if (esph == NULL || mtag != NULL) tc = malloc(sizeof(*tc), M_XDATA, M_NOWAIT | M_ZERO); else tc = malloc(sizeof(*tc) + alen, M_XDATA, M_NOWAIT | M_ZERO); if (tc == NULL) { m_freem(m); crypto_freereq(crp); DPRINTF(("esp_input(): failed to allocate tdb_crypto\n")); espstat.esps_crypto++; return ENOBUFS; } tc->tc_ptr = (caddr_t) mtag; if (esph) { crda = crp->crp_desc; crde = crda->crd_next; /* Authentication descriptor */ crda->crd_skip = skip; crda->crd_len = m->m_pkthdr.len - (skip + alen); crda->crd_inject = m->m_pkthdr.len - alen; crda->crd_alg = esph->type; crda->crd_key = tdb->tdb_amxkey; crda->crd_klen = tdb->tdb_amxkeylen * 8; /* Copy the authenticator */ if (mtag == NULL) m_copydata(m, m->m_pkthdr.len - alen, alen, (caddr_t) (tc + 1)); } else crde = crp->crp_desc; /* Crypto operation descriptor */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length */ crp->crp_flags = CRYPTO_F_IMBUF; crp->crp_buf = (caddr_t) m; crp->crp_callback = (int (*) (struct cryptop *)) esp_input_cb; crp->crp_sid = tdb->tdb_cryptoid; crp->crp_opaque = (caddr_t) tc; /* These are passed as-is to the callback */ tc->tc_skip = skip; tc->tc_protoff = protoff; tc->tc_spi = tdb->tdb_spi; tc->tc_proto = tdb->tdb_sproto; bcopy(&tdb->tdb_dst, &tc->tc_dst, sizeof(union sockaddr_union)); /* Decryption descriptor */ if (espx) { crde->crd_skip = skip + hlen; crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen); crde->crd_inject = skip + hlen - tdb->tdb_ivlen; if (tdb->tdb_flags & TDBF_HALFIV) { /* Copy half-IV from packet */ m_copydata(m, crde->crd_inject, tdb->tdb_ivlen, crde->crd_iv); /* Cook IV */ for (btsx = 0; btsx < tdb->tdb_ivlen; btsx++) crde->crd_iv[tdb->tdb_ivlen + btsx] = ~crde->crd_iv[btsx]; crde->crd_flags |= CRD_F_IV_EXPLICIT; } crde->crd_alg = espx->type; crde->crd_key = tdb->tdb_emxkey; crde->crd_klen = tdb->tdb_emxkeylen * 8; /* XXX Rounds ? */ } if (mtag == NULL) return crypto_dispatch(crp); else return esp_input_cb(crp); }
/* * This is the main function responsible for cryptography (ie. communication * with crypto(9) subsystem). * * BIO_READ: * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> G_ELI_CRYPTO_RUN -> g_eli_crypto_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> G_ELI_CRYPTO_RUN -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ void g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp) { struct g_eli_softc *sc; struct cryptop *crp; struct cryptodesc *crd; u_int i, nsec, secsize; int err, error; off_t dstoff; size_t size; u_char *p, *data; G_ELI_LOGREQ(3, bp, "%s", __func__); bp->bio_pflags = wr->w_number; sc = wr->w_softc; secsize = LIST_FIRST(&sc->sc_geom->provider)->sectorsize; nsec = bp->bio_length / secsize; /* * Calculate how much memory do we need. * We need separate crypto operation for every single sector. * It is much faster to calculate total amount of needed memory here and * do the allocation once instead of allocating memory in pieces (many, * many pieces). */ size = sizeof(*crp) * nsec; size += sizeof(*crd) * nsec; /* * If we write the data we cannot destroy current bio_data content, * so we need to allocate more memory for encrypted data. */ if (bp->bio_cmd == BIO_WRITE) size += bp->bio_length; p = malloc(size, M_ELI, M_WAITOK); bp->bio_inbed = 0; bp->bio_children = nsec; bp->bio_driver2 = p; if (bp->bio_cmd == BIO_READ) data = bp->bio_data; else { data = p; p += bp->bio_length; bcopy(bp->bio_data, data, bp->bio_length); } error = 0; for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) { crp = (struct cryptop *)p; p += sizeof(*crp); crd = (struct cryptodesc *)p; p += sizeof(*crd); crp->crp_sid = wr->w_sid; crp->crp_ilen = secsize; crp->crp_olen = secsize; crp->crp_opaque = (void *)bp; crp->crp_buf = (void *)data; data += secsize; if (bp->bio_cmd == BIO_WRITE) crp->crp_callback = g_eli_crypto_write_done; else /* if (bp->bio_cmd == BIO_READ) */ crp->crp_callback = g_eli_crypto_read_done; crp->crp_flags = CRYPTO_F_CBIFSYNC; if (g_eli_batch) crp->crp_flags |= CRYPTO_F_BATCH; crp->crp_desc = crd; crd->crd_skip = 0; crd->crd_len = secsize; crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) == 0) crd->crd_flags |= CRD_F_KEY_EXPLICIT; if (bp->bio_cmd == BIO_WRITE) crd->crd_flags |= CRD_F_ENCRYPT; crd->crd_alg = sc->sc_ealgo; crd->crd_key = g_eli_key_hold(sc, dstoff, secsize); crd->crd_klen = sc->sc_ekeylen; if (sc->sc_ealgo == CRYPTO_AES_XTS) crd->crd_klen <<= 1; g_eli_crypto_ivgen(sc, dstoff, crd->crd_iv, sizeof(crd->crd_iv)); crd->crd_next = NULL; crp->crp_etype = 0; err = crypto_dispatch(crp); if (error == 0) error = err; } if (bp->bio_error == 0) bp->bio_error = error; }
static int g_eli_crypto_cipher(u_int algo, int enc, u_char *data, size_t datasize, const u_char *key, size_t keysize) { struct cryptoini cri; struct cryptop *crp; struct cryptodesc *crd; struct uio *uio; struct iovec *iov; uint64_t sid; u_char *p; int error; bzero(&cri, sizeof(cri)); cri.cri_alg = algo; cri.cri_key = __DECONST(void *, key); cri.cri_klen = keysize; error = crypto_newsession(&sid, &cri, CRYPTOCAP_F_SOFTWARE); if (error != 0) return (error); p = malloc(sizeof(*crp) + sizeof(*crd) + sizeof(*uio) + sizeof(*iov), M_ELI, M_NOWAIT | M_ZERO); if (p == NULL) { crypto_freesession(sid); return (ENOMEM); } crp = (struct cryptop *)p; p += sizeof(*crp); crd = (struct cryptodesc *)p; p += sizeof(*crd); uio = (struct uio *)p; p += sizeof(*uio); iov = (struct iovec *)p; p += sizeof(*iov); iov->iov_len = datasize; iov->iov_base = data; uio->uio_iov = iov; uio->uio_iovcnt = 1; uio->uio_segflg = UIO_SYSSPACE; uio->uio_resid = datasize; crd->crd_skip = 0; crd->crd_len = datasize; crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; if (enc) crd->crd_flags |= CRD_F_ENCRYPT; crd->crd_alg = algo; crd->crd_key = __DECONST(void *, key); crd->crd_klen = keysize; bzero(crd->crd_iv, sizeof(crd->crd_iv)); crd->crd_next = NULL; crp->crp_sid = sid; crp->crp_ilen = datasize; crp->crp_olen = datasize; crp->crp_opaque = NULL; crp->crp_callback = g_eli_crypto_done; crp->crp_buf = (void *)uio; crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIFSYNC | CRYPTO_F_REL; crp->crp_desc = crd; error = crypto_dispatch(crp); if (error == 0) { while (crp->crp_opaque == NULL) tsleep(crp, PRIBIO, "geli", hz / 5); error = crp->crp_etype; } free(crp, M_ELI); crypto_freesession(sid); return (error); }
/* * ESP output callback, called directly by the crypto driver. */ int esp_output_cb(void *op) { struct cryptop *crp = (struct cryptop *) op; struct tdb_crypto *tc; struct tdb *tdb; struct mbuf *m; int error, s; tc = (struct tdb_crypto *) crp->crp_opaque; m = (struct mbuf *) crp->crp_buf; if (m == NULL) { /* Shouldn't happen... */ free(tc, M_XDATA); crypto_freereq(crp); espstat.esps_crypto++; DPRINTF(("esp_output_cb(): bogus returned buffer from " "crypto\n")); return (EINVAL); } s = spltdb(); tdb = gettdb(tc->tc_spi, &tc->tc_dst, tc->tc_proto); if (tdb == NULL) { free(tc, M_XDATA); espstat.esps_notdb++; DPRINTF(("esp_output_cb(): TDB is expired while in crypto\n")); error = EPERM; goto baddone; } /* Check for crypto errors. */ if (crp->crp_etype) { if (crp->crp_etype == EAGAIN) { /* Reset the session ID */ if (tdb->tdb_cryptoid != 0) tdb->tdb_cryptoid = crp->crp_sid; splx(s); return crypto_dispatch(crp); } free(tc, M_XDATA); espstat.esps_noxform++; DPRINTF(("esp_output_cb(): crypto error %d\n", crp->crp_etype)); error = crp->crp_etype; goto baddone; } free(tc, M_XDATA); /* Release crypto descriptors. */ crypto_freereq(crp); /* * If we're doing half-iv, keep a copy of the last few bytes of the * encrypted part, for use as the next IV. Note that HALF-IV is only * supposed to be used without authentication (the old ESP specs). */ if (tdb->tdb_flags & TDBF_HALFIV) m_copydata(m, m->m_pkthdr.len - tdb->tdb_ivlen, tdb->tdb_ivlen, tdb->tdb_iv); /* Call the IPsec input callback. */ error = ipsp_process_done(m, tdb); splx(s); return error; baddone: splx(s); if (m != NULL) m_freem(m); crypto_freereq(crp); return error; }
/* * ESP input callback from the crypto driver. */ static int esp_input_cb(struct cryptop *crp) { IPSEC_DEBUG_DECLARE(char buf[128]); u_int8_t lastthree[3], aalg[AH_HMAC_MAXHASHLEN]; const struct auth_hash *esph; const struct enc_xform *espx; struct mbuf *m; struct cryptodesc *crd; struct xform_data *xd; struct secasvar *sav; struct secasindex *saidx; caddr_t ptr; uint64_t cryptoid; int hlen, skip, protoff, error, alen; crd = crp->crp_desc; IPSEC_ASSERT(crd != NULL, ("null crypto descriptor!")); m = (struct mbuf *) crp->crp_buf; xd = (struct xform_data *) crp->crp_opaque; sav = xd->sav; skip = xd->skip; protoff = xd->protoff; cryptoid = xd->cryptoid; saidx = &sav->sah->saidx; esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; /* Check for crypto errors */ if (crp->crp_etype) { if (crp->crp_etype == EAGAIN) { /* Reset the session ID */ if (ipsec_updateid(sav, &crp->crp_sid, &cryptoid) != 0) crypto_freesession(cryptoid); xd->cryptoid = crp->crp_sid; return (crypto_dispatch(crp)); } ESPSTAT_INC(esps_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; goto bad; } /* Shouldn't happen... */ if (m == NULL) { ESPSTAT_INC(esps_crypto); DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } ESPSTAT_INC(esps_hist[sav->alg_enc]); /* If authentication was performed, check now. */ if (esph != NULL) { alen = xform_ah_authsize(esph); AHSTAT_INC(ahs_hist[sav->alg_auth]); /* Copy the authenticator from the packet */ m_copydata(m, m->m_pkthdr.len - alen, alen, aalg); ptr = (caddr_t) (xd + 1); /* Verify authenticator */ if (timingsafe_bcmp(ptr, aalg, alen) != 0) { DPRINTF(("%s: authentication hash mismatch for " "packet in SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_badauth); error = EACCES; goto bad; } m->m_flags |= M_AUTHIPDGM; /* Remove trailing authenticator */ m_adj(m, -alen); } /* Release the crypto descriptors */ free(xd, M_XDATA), xd = NULL; crypto_freereq(crp), crp = NULL; /* * Packet is now decrypted. */ m->m_flags |= M_DECRYPTED; /* * Update replay sequence number, if appropriate. */ if (sav->replay) { u_int32_t seq; m_copydata(m, skip + offsetof(struct newesp, esp_seq), sizeof (seq), (caddr_t) &seq); SECASVAR_LOCK(sav); if (ipsec_updatereplay(ntohl(seq), sav)) { SECASVAR_UNLOCK(sav); DPRINTF(("%s: packet replay check for %s\n", __func__, ipsec_sa2str(sav, buf, sizeof(buf)))); ESPSTAT_INC(esps_replay); error = EACCES; goto bad; } SECASVAR_UNLOCK(sav); }