size_t SSL_AEAD_CTX_max_overhead(SSL_AEAD_CTX *aead) { if (aead == NULL) { return 0; } return EVP_AEAD_max_overhead(aead->ctx.aead) + SSL_AEAD_CTX_explicit_nonce_len(aead); }
static int aead_aes_ccm_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, unsigned M, unsigned L) { assert(M == EVP_AEAD_max_overhead(ctx->aead)); assert(M == EVP_AEAD_max_tag_len(ctx->aead)); assert(15 - L == EVP_AEAD_nonce_length(ctx->aead)); if (key_len != EVP_AEAD_key_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); return 0; // EVP_AEAD_CTX_init should catch this. } if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { tag_len = M; } if (tag_len != M) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); return 0; } struct aead_aes_ccm_ctx *ccm_ctx = (struct aead_aes_ccm_ctx *)&ctx->state; block128_f block; ctr128_f ctr = aes_ctr_set_key(&ccm_ctx->ks.ks, NULL, &block, key, key_len); ctx->tag_len = tag_len; if (!CRYPTO_ccm128_init(&ccm_ctx->ccm, &ccm_ctx->ks.ks, block, ctr, M, L)) { OPENSSL_PUT_ERROR(CIPHER, ERR_R_INTERNAL_ERROR); return 0; } return 1; }
size_t SSL_AEAD_CTX_max_overhead(SSL_AEAD_CTX *aead) { #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) aead = NULL; #endif if (aead == NULL) { return 0; } return EVP_AEAD_max_overhead(aead->ctx.aead) + SSL_AEAD_CTX_explicit_nonce_len(aead); }
static int aead_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; size_t total = 0; if (!tls_ctx->cipher_ctx.encrypt) { /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */ OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_OPERATION); return 0; } if (in_len + EVP_AEAD_max_overhead(ctx->aead) < in_len || in_len > INT_MAX) { /* EVP_CIPHER takes int as input. */ OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_TOO_LARGE); return 0; } if (max_out_len < in_len + EVP_AEAD_max_overhead(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (ad_len != 13 - 2 /* length bytes */) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_AD_SIZE); return 0; } /* To allow for CBC mode which changes cipher length, |ad| doesn't include the * length for legacy ciphers. */ uint8_t ad_extra[2]; ad_extra[0] = (uint8_t)(in_len >> 8); ad_extra[1] = (uint8_t)(in_len & 0xff); /* Compute the MAC. This must be first in case the operation is being done * in-place. */ uint8_t mac[EVP_MAX_MD_SIZE]; unsigned mac_len; HMAC_CTX hmac_ctx; HMAC_CTX_init(&hmac_ctx); if (!HMAC_CTX_copy_ex(&hmac_ctx, &tls_ctx->hmac_ctx) || !HMAC_Update(&hmac_ctx, ad, ad_len) || !HMAC_Update(&hmac_ctx, ad_extra, sizeof(ad_extra)) || !HMAC_Update(&hmac_ctx, in, in_len) || !HMAC_Final(&hmac_ctx, mac, &mac_len)) { HMAC_CTX_cleanup(&hmac_ctx); return 0; } HMAC_CTX_cleanup(&hmac_ctx); /* Configure the explicit IV. */ if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } /* Encrypt the input. */ int len; if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; } total = len; /* Feed the MAC into the cipher. */ if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, mac, (int)mac_len)) { return 0; } total += len; unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); if (block_size > 1) { assert(block_size <= 256); assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE); /* Compute padding and feed that into the cipher. */ uint8_t padding[256]; unsigned padding_len = block_size - ((in_len + mac_len) % block_size); memset(padding, padding_len - 1, padding_len); if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, padding, (int)padding_len)) { return 0; } total += len; } if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) { return 0; } total += len; *out_len = total; return 1; }
static int aead_ssl3_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len) { AEAD_SSL3_CTX *ssl3_ctx = (AEAD_SSL3_CTX *)ctx->aead_state; size_t total = 0; if (!ssl3_ctx->cipher_ctx.encrypt) { /* Unlike a normal AEAD, an SSL3 AEAD may only be used in one direction. */ OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } if (in_len + EVP_AEAD_max_overhead(ctx->aead) < in_len || in_len > INT_MAX) { /* EVP_CIPHER takes int as input. */ OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_len < in_len + EVP_AEAD_max_overhead(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_IV_TOO_LARGE); return 0; } if (ad_len != 11 - 2 /* length bytes */) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE); return 0; } /* Compute the MAC. This must be first in case the operation is being done * in-place. */ uint8_t mac[EVP_MAX_MD_SIZE]; unsigned mac_len; if (!ssl3_mac(ssl3_ctx, mac, &mac_len, ad, ad_len, in, in_len)) { return 0; } /* Encrypt the input. */ int len; if (!EVP_EncryptUpdate(&ssl3_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; } total = len; /* Feed the MAC into the cipher. */ if (!EVP_EncryptUpdate(&ssl3_ctx->cipher_ctx, out + total, &len, mac, (int)mac_len)) { return 0; } total += len; unsigned block_size = EVP_CIPHER_CTX_block_size(&ssl3_ctx->cipher_ctx); if (block_size > 1) { assert(block_size <= 256); assert(EVP_CIPHER_CTX_mode(&ssl3_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE); /* Compute padding and feed that into the cipher. */ uint8_t padding[256]; unsigned padding_len = block_size - ((in_len + mac_len) % block_size); memset(padding, 0, padding_len - 1); padding[padding_len - 1] = padding_len - 1; if (!EVP_EncryptUpdate(&ssl3_ctx->cipher_ctx, out + total, &len, padding, (int)padding_len)) { return 0; } total += len; } if (!EVP_EncryptFinal_ex(&ssl3_ctx->cipher_ctx, out + total, &len)) { return 0; } total += len; *out_len = total; return 1; }
static int tls1_change_cipher_state_aead(SSL *s, char is_read, const uint8_t *key, unsigned key_len, const uint8_t *iv, unsigned iv_len, const uint8_t *mac_secret, unsigned mac_secret_len) { const EVP_AEAD *aead = s->s3->tmp.new_aead; SSL_AEAD_CTX *aead_ctx; /* merged_key is used to merge the MAC, cipher, and IV keys for an AEAD which * simulates pre-AEAD cipher suites. */ uint8_t merged_key[EVP_AEAD_MAX_KEY_LENGTH]; if (mac_secret_len > 0) { /* This is a "stateful" AEAD (for compatibility with pre-AEAD cipher * suites). */ if (mac_secret_len + key_len + iv_len > sizeof(merged_key)) { OPENSSL_PUT_ERROR(SSL, tls1_change_cipher_state_aead, ERR_R_INTERNAL_ERROR); return 0; } memcpy(merged_key, mac_secret, mac_secret_len); memcpy(merged_key + mac_secret_len, key, key_len); memcpy(merged_key + mac_secret_len + key_len, iv, iv_len); key = merged_key; key_len += mac_secret_len; key_len += iv_len; } if (is_read) { if (!tls1_aead_ctx_init(&s->aead_read_ctx)) { return 0; } aead_ctx = s->aead_read_ctx; } else { if (SSL_IS_DTLS(s) && s->aead_write_ctx != NULL) { /* DTLS renegotiation is unsupported, so a CCS can only switch away from * the NULL cipher. This simplifies renegotiation. */ OPENSSL_PUT_ERROR(SSL, tls1_change_cipher_state_aead, ERR_R_INTERNAL_ERROR); return 0; } if (!tls1_aead_ctx_init(&s->aead_write_ctx)) { return 0; } aead_ctx = s->aead_write_ctx; } if (!EVP_AEAD_CTX_init_with_direction( &aead_ctx->ctx, aead, key, key_len, EVP_AEAD_DEFAULT_TAG_LENGTH, is_read ? evp_aead_open : evp_aead_seal)) { OPENSSL_free(aead_ctx); if (is_read) { s->aead_read_ctx = NULL; } else { s->aead_write_ctx = NULL; } return 0; } if (mac_secret_len == 0) { /* For a real AEAD, the IV is the fixed part of the nonce. */ if (iv_len > sizeof(aead_ctx->fixed_nonce)) { OPENSSL_PUT_ERROR(SSL, tls1_change_cipher_state_aead, ERR_R_INTERNAL_ERROR); return 0; } memcpy(aead_ctx->fixed_nonce, iv, iv_len); aead_ctx->fixed_nonce_len = iv_len; aead_ctx->variable_nonce_included_in_record = (s->s3->tmp.new_cipher->algorithm2 & SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD) != 0; aead_ctx->random_variable_nonce = 0; aead_ctx->omit_length_in_ad = 0; } else { aead_ctx->fixed_nonce_len = 0; aead_ctx->variable_nonce_included_in_record = 1; aead_ctx->random_variable_nonce = 1; aead_ctx->omit_length_in_ad = 1; } aead_ctx->variable_nonce_len = s->s3->tmp.new_variable_iv_len; aead_ctx->omit_version_in_ad = (s->version == SSL3_VERSION); if (aead_ctx->variable_nonce_len + aead_ctx->fixed_nonce_len != EVP_AEAD_nonce_length(aead)) { OPENSSL_PUT_ERROR(SSL, tls1_change_cipher_state_aead, ERR_R_INTERNAL_ERROR); return 0; } aead_ctx->tag_len = EVP_AEAD_max_overhead(aead); return 1; }