static int aead_aes_ccm_seal_scatter( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { const struct aead_aes_ccm_ctx *ccm_ctx = ctx->aead_state; if (in_len > CRYPTO_ccm128_max_input(&ccm_ctx->ccm)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < ctx->tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (!CRYPTO_ccm128_encrypt(&ccm_ctx->ccm, &ccm_ctx->ks.ks, out, out_tag, ctx->tag_len, nonce, nonce_len, in, in_len, ad, ad_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } *out_tag_len = ctx->tag_len; return 1; }
static int aead_aes_ccm_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, unsigned M, unsigned L) { assert(M == EVP_AEAD_max_overhead(ctx->aead)); assert(M == EVP_AEAD_max_tag_len(ctx->aead)); assert(15 - L == EVP_AEAD_nonce_length(ctx->aead)); if (key_len != EVP_AEAD_key_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); return 0; // EVP_AEAD_CTX_init should catch this. } if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { tag_len = M; } if (tag_len != M) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); return 0; } struct aead_aes_ccm_ctx *ccm_ctx = (struct aead_aes_ccm_ctx *)&ctx->state; block128_f block; ctr128_f ctr = aes_ctr_set_key(&ccm_ctx->ks.ks, NULL, &block, key, key_len); ctx->tag_len = tag_len; if (!CRYPTO_ccm128_init(&ccm_ctx->ccm, &ccm_ctx->ks.ks, block, ctr, M, L)) { OPENSSL_PUT_ERROR(CIPHER, ERR_R_INTERNAL_ERROR); return 0; } return 1; }
static int aead_aes_ccm_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len) { const struct aead_aes_ccm_ctx *ccm_ctx = (struct aead_aes_ccm_ctx *)&ctx->state; if (in_len > CRYPTO_ccm128_max_input(&ccm_ctx->ccm)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (in_tag_len != ctx->tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } uint8_t tag[EVP_AEAD_AES_CCM_MAX_TAG_LEN]; assert(ctx->tag_len <= EVP_AEAD_AES_CCM_MAX_TAG_LEN); if (!CRYPTO_ccm128_decrypt(&ccm_ctx->ccm, &ccm_ctx->ks.ks, out, tag, ctx->tag_len, nonce, nonce_len, in, in_len, ad, ad_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (CRYPTO_memcmp(tag, in_tag, ctx->tag_len) != 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } return 1; }
SSL_AEAD_CTX *SSL_AEAD_CTX_new(enum evp_aead_direction_t direction, uint16_t version, const SSL_CIPHER *cipher, const uint8_t *enc_key, size_t enc_key_len, const uint8_t *mac_key, size_t mac_key_len, const uint8_t *fixed_iv, size_t fixed_iv_len) { const EVP_AEAD *aead; size_t discard; if (!ssl_cipher_get_evp_aead(&aead, &discard, &discard, cipher, version)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return 0; } uint8_t merged_key[EVP_AEAD_MAX_KEY_LENGTH]; if (mac_key_len > 0) { /* This is a "stateful" AEAD (for compatibility with pre-AEAD cipher * suites). */ if (mac_key_len + enc_key_len + fixed_iv_len > sizeof(merged_key)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return 0; } memcpy(merged_key, mac_key, mac_key_len); memcpy(merged_key + mac_key_len, enc_key, enc_key_len); memcpy(merged_key + mac_key_len + enc_key_len, fixed_iv, fixed_iv_len); enc_key = merged_key; enc_key_len += mac_key_len; enc_key_len += fixed_iv_len; } SSL_AEAD_CTX *aead_ctx = OPENSSL_malloc(sizeof(SSL_AEAD_CTX)); if (aead_ctx == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); return NULL; } memset(aead_ctx, 0, sizeof(SSL_AEAD_CTX)); aead_ctx->cipher = cipher; if (!EVP_AEAD_CTX_init_with_direction( &aead_ctx->ctx, aead, enc_key, enc_key_len, EVP_AEAD_DEFAULT_TAG_LENGTH, direction)) { OPENSSL_free(aead_ctx); return NULL; } assert(EVP_AEAD_nonce_length(aead) <= EVP_AEAD_MAX_NONCE_LENGTH); aead_ctx->variable_nonce_len = (uint8_t)EVP_AEAD_nonce_length(aead); if (mac_key_len == 0) { assert(fixed_iv_len <= sizeof(aead_ctx->fixed_nonce)); memcpy(aead_ctx->fixed_nonce, fixed_iv, fixed_iv_len); aead_ctx->fixed_nonce_len = fixed_iv_len; if (cipher->algorithm_enc & SSL_CHACHA20POLY1305) { /* The fixed nonce into the actual nonce (the sequence number). */ aead_ctx->xor_fixed_nonce = 1; aead_ctx->variable_nonce_len = 8; } else { /* The fixed IV is prepended to the nonce. */ assert(fixed_iv_len <= aead_ctx->variable_nonce_len); aead_ctx->variable_nonce_len -= fixed_iv_len; } /* AES-GCM uses an explicit nonce. */ if (cipher->algorithm_enc & (SSL_AES128GCM | SSL_AES256GCM)) { aead_ctx->variable_nonce_included_in_record = 1; } /* The TLS 1.3 construction XORs the fixed nonce into the sequence number * and omits the additional data. */ if (version >= TLS1_3_VERSION) { aead_ctx->xor_fixed_nonce = 1; aead_ctx->variable_nonce_len = 8; aead_ctx->variable_nonce_included_in_record = 0; aead_ctx->omit_ad = 1; } } else { aead_ctx->variable_nonce_included_in_record = 1; aead_ctx->random_variable_nonce = 1; aead_ctx->omit_length_in_ad = 1; aead_ctx->omit_version_in_ad = (version == SSL3_VERSION); } return aead_ctx; }
static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; if (tls_ctx->cipher_ctx.encrypt) { /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */ OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_OPERATION); return 0; } if (in_len < HMAC_size(&tls_ctx->hmac_ctx)) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT); return 0; } if (max_out_len < in_len) { /* This requires that the caller provide space for the MAC, even though it * will always be removed on return. */ OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (ad_len != 13 - 2 /* length bytes */) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_AD_SIZE); return 0; } if (in_len > INT_MAX) { /* EVP_CIPHER takes int as input. */ OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_TOO_LARGE); return 0; } /* Configure the explicit IV. */ if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } /* Decrypt to get the plaintext + MAC + padding. */ size_t total = 0; int len; if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; } total += len; if (!EVP_DecryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) { return 0; } total += len; assert(total == in_len); /* Remove CBC padding. Code from here on is timing-sensitive with respect to * |padding_ok| and |data_plus_mac_len| for CBC ciphers. */ int padding_ok; unsigned data_plus_mac_len, data_len; if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) { padding_ok = EVP_tls_cbc_remove_padding( &data_plus_mac_len, out, total, EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx), (unsigned)HMAC_size(&tls_ctx->hmac_ctx)); /* Publicly invalid. This can be rejected in non-constant time. */ if (padding_ok == 0) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT); return 0; } } else { padding_ok = 1; data_plus_mac_len = total; /* |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has * already been checked against the MAC size at the top of the function. */ assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx)); } data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx); /* At this point, |padding_ok| is 1 or -1. If 1, the padding is valid and the * first |data_plus_mac_size| bytes after |out| are the plaintext and * MAC. Either way, |data_plus_mac_size| is large enough to extract a MAC. */ /* To allow for CBC mode which changes cipher length, |ad| doesn't include the * length for legacy ciphers. */ uint8_t ad_fixed[13]; memcpy(ad_fixed, ad, 11); ad_fixed[11] = (uint8_t)(data_len >> 8); ad_fixed[12] = (uint8_t)(data_len & 0xff); ad_len += 2; /* Compute the MAC and extract the one in the record. */ uint8_t mac[EVP_MAX_MD_SIZE]; size_t mac_len; uint8_t record_mac_tmp[EVP_MAX_MD_SIZE]; uint8_t *record_mac; if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx.md)) { if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx.md, mac, &mac_len, ad_fixed, out, data_plus_mac_len, total, tls_ctx->mac_key, tls_ctx->mac_key_len)) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT); return 0; } assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx)); record_mac = record_mac_tmp; EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total); } else { /* We should support the constant-time path for all CBC-mode ciphers * implemented. */ assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE); HMAC_CTX hmac_ctx; HMAC_CTX_init(&hmac_ctx); unsigned mac_len_u; if (!HMAC_CTX_copy_ex(&hmac_ctx, &tls_ctx->hmac_ctx) || !HMAC_Update(&hmac_ctx, ad_fixed, ad_len) || !HMAC_Update(&hmac_ctx, out, data_len) || !HMAC_Final(&hmac_ctx, mac, &mac_len_u)) { HMAC_CTX_cleanup(&hmac_ctx); return 0; } mac_len = mac_len_u; HMAC_CTX_cleanup(&hmac_ctx); assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx)); record_mac = &out[data_len]; } /* Perform the MAC check and the padding check in constant-time. It should be * safe to simply perform the padding check first, but it would not be under a * different choice of MAC location on padding failure. See * EVP_tls_cbc_remove_padding. */ unsigned good = constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len), 0); good &= constant_time_eq_int(padding_ok, 1); if (!good) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT); return 0; } /* End of timing-sensitive code. */ *out_len = data_len; return 1; }
static int aead_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; size_t total = 0; if (!tls_ctx->cipher_ctx.encrypt) { /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */ OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_OPERATION); return 0; } if (in_len + EVP_AEAD_max_overhead(ctx->aead) < in_len || in_len > INT_MAX) { /* EVP_CIPHER takes int as input. */ OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_TOO_LARGE); return 0; } if (max_out_len < in_len + EVP_AEAD_max_overhead(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (ad_len != 13 - 2 /* length bytes */) { OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_AD_SIZE); return 0; } /* To allow for CBC mode which changes cipher length, |ad| doesn't include the * length for legacy ciphers. */ uint8_t ad_extra[2]; ad_extra[0] = (uint8_t)(in_len >> 8); ad_extra[1] = (uint8_t)(in_len & 0xff); /* Compute the MAC. This must be first in case the operation is being done * in-place. */ uint8_t mac[EVP_MAX_MD_SIZE]; unsigned mac_len; HMAC_CTX hmac_ctx; HMAC_CTX_init(&hmac_ctx); if (!HMAC_CTX_copy_ex(&hmac_ctx, &tls_ctx->hmac_ctx) || !HMAC_Update(&hmac_ctx, ad, ad_len) || !HMAC_Update(&hmac_ctx, ad_extra, sizeof(ad_extra)) || !HMAC_Update(&hmac_ctx, in, in_len) || !HMAC_Final(&hmac_ctx, mac, &mac_len)) { HMAC_CTX_cleanup(&hmac_ctx); return 0; } HMAC_CTX_cleanup(&hmac_ctx); /* Configure the explicit IV. */ if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } /* Encrypt the input. */ int len; if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; } total = len; /* Feed the MAC into the cipher. */ if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, mac, (int)mac_len)) { return 0; } total += len; unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); if (block_size > 1) { assert(block_size <= 256); assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE); /* Compute padding and feed that into the cipher. */ uint8_t padding[256]; unsigned padding_len = block_size - ((in_len + mac_len) % block_size); memset(padding, padding_len - 1, padding_len); if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, padding, (int)padding_len)) { return 0; } total += len; } if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) { return 0; } total += len; *out_len = total; return 1; }
int tls1_setup_key_block(SSL *s) { uint8_t *p; const EVP_AEAD *aead = NULL; int ret = 0; size_t mac_secret_len, fixed_iv_len, variable_iv_len, key_len; size_t key_block_len; if (s->s3->tmp.key_block_length != 0) { return 1; } if (s->session->cipher == NULL) { goto cipher_unavailable_err; } if (!ssl_cipher_get_evp_aead(&aead, &mac_secret_len, &fixed_iv_len, s->session->cipher, ssl3_version_from_wire(s, s->version))) { goto cipher_unavailable_err; } key_len = EVP_AEAD_key_length(aead); variable_iv_len = EVP_AEAD_nonce_length(aead); if (mac_secret_len > 0) { /* For "stateful" AEADs (i.e. compatibility with pre-AEAD cipher suites) the * key length reported by |EVP_AEAD_key_length| will include the MAC key * bytes and initial implicit IV. */ if (key_len < mac_secret_len + fixed_iv_len) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return 0; } key_len -= mac_secret_len + fixed_iv_len; } else { /* The nonce is split into a fixed portion and a variable portion. */ if (variable_iv_len < fixed_iv_len) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return 0; } variable_iv_len -= fixed_iv_len; } assert(mac_secret_len < 256); assert(fixed_iv_len < 256); assert(variable_iv_len < 256); s->s3->tmp.new_aead = aead; s->s3->tmp.new_mac_secret_len = (uint8_t)mac_secret_len; s->s3->tmp.new_fixed_iv_len = (uint8_t)fixed_iv_len; s->s3->tmp.new_variable_iv_len = (uint8_t)variable_iv_len; key_block_len = key_len + mac_secret_len + fixed_iv_len; key_block_len *= 2; ssl3_cleanup_key_block(s); p = (uint8_t *)OPENSSL_malloc(key_block_len); if (p == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); goto err; } s->s3->tmp.key_block_length = key_block_len; s->s3->tmp.key_block = p; if (!tls1_generate_key_block(s, p, key_block_len)) { goto err; } ret = 1; err: return ret; cipher_unavailable_err: OPENSSL_PUT_ERROR(SSL, SSL_R_CIPHER_OR_HASH_UNAVAILABLE); return 0; }
SSL_AEAD_CTX *SSL_AEAD_CTX_new(enum evp_aead_direction_t direction, uint16_t version, const SSL_CIPHER *cipher, const uint8_t *enc_key, size_t enc_key_len, const uint8_t *mac_key, size_t mac_key_len, const uint8_t *fixed_iv, size_t fixed_iv_len) { const EVP_AEAD *aead; size_t discard; if (!ssl_cipher_get_evp_aead(&aead, &discard, &discard, cipher, version)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return 0; } uint8_t merged_key[EVP_AEAD_MAX_KEY_LENGTH]; if (mac_key_len > 0) { /* This is a "stateful" AEAD (for compatibility with pre-AEAD cipher * suites). */ if (mac_key_len + enc_key_len + fixed_iv_len > sizeof(merged_key)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return 0; } memcpy(merged_key, mac_key, mac_key_len); memcpy(merged_key + mac_key_len, enc_key, enc_key_len); memcpy(merged_key + mac_key_len + enc_key_len, fixed_iv, fixed_iv_len); enc_key = merged_key; enc_key_len += mac_key_len; enc_key_len += fixed_iv_len; } SSL_AEAD_CTX *aead_ctx = (SSL_AEAD_CTX *)OPENSSL_malloc(sizeof(SSL_AEAD_CTX)); if (aead_ctx == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); return NULL; } memset(aead_ctx, 0, sizeof(SSL_AEAD_CTX)); aead_ctx->cipher = cipher; if (!EVP_AEAD_CTX_init_with_direction( &aead_ctx->ctx, aead, enc_key, enc_key_len, EVP_AEAD_DEFAULT_TAG_LENGTH, direction)) { OPENSSL_free(aead_ctx); return NULL; } assert(EVP_AEAD_nonce_length(aead) <= EVP_AEAD_MAX_NONCE_LENGTH); aead_ctx->variable_nonce_len = (uint8_t)EVP_AEAD_nonce_length(aead); if (mac_key_len == 0) { /* For a real AEAD, the IV is the fixed part of the nonce. */ if (fixed_iv_len > sizeof(aead_ctx->fixed_nonce) || fixed_iv_len > aead_ctx->variable_nonce_len) { SSL_AEAD_CTX_free(aead_ctx); OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return 0; } aead_ctx->variable_nonce_len -= fixed_iv_len; memcpy(aead_ctx->fixed_nonce, fixed_iv, fixed_iv_len); aead_ctx->fixed_nonce_len = fixed_iv_len; /* AES-GCM uses an explicit nonce. */ if (cipher->algorithm_enc & (SSL_AES128GCM | SSL_AES256GCM)) { aead_ctx->variable_nonce_included_in_record = 1; } } else { aead_ctx->variable_nonce_included_in_record = 1; aead_ctx->random_variable_nonce = 1; aead_ctx->omit_length_in_ad = 1; aead_ctx->omit_version_in_ad = (version == SSL3_VERSION); } return aead_ctx; }
static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; if (tls_ctx->cipher_ctx.encrypt) { // Unlike a normal AEAD, a TLS AEAD may only be used in one direction. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } if (in_len < HMAC_size(&tls_ctx->hmac_ctx)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } if (max_out_len < in_len) { // This requires that the caller provide space for the MAC, even though it // will always be removed on return. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (ad_len != 13 - 2 /* length bytes */) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE); return 0; } if (in_len > INT_MAX) { // EVP_CIPHER takes int as input. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } // Configure the explicit IV. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } // Decrypt to get the plaintext + MAC + padding. size_t total = 0; int len; if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; } total += len; if (!EVP_DecryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) { return 0; } total += len; assert(total == in_len); // Remove CBC padding. Code from here on is timing-sensitive with respect to // |padding_ok| and |data_plus_mac_len| for CBC ciphers. size_t data_plus_mac_len; crypto_word_t padding_ok; if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) { if (!EVP_tls_cbc_remove_padding( &padding_ok, &data_plus_mac_len, out, total, EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx), HMAC_size(&tls_ctx->hmac_ctx))) { // Publicly invalid. This can be rejected in non-constant time. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } } else { padding_ok = CONSTTIME_TRUE_W; data_plus_mac_len = total; // |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has // already been checked against the MAC size at the top of the function. assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx)); } size_t data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx); // At this point, if the padding is valid, the first |data_plus_mac_len| bytes // after |out| are the plaintext and MAC. Otherwise, |data_plus_mac_len| is // still large enough to extract a MAC, but it will be irrelevant. // To allow for CBC mode which changes cipher length, |ad| doesn't include the // length for legacy ciphers. uint8_t ad_fixed[13]; OPENSSL_memcpy(ad_fixed, ad, 11); ad_fixed[11] = (uint8_t)(data_len >> 8); ad_fixed[12] = (uint8_t)(data_len & 0xff); ad_len += 2; // Compute the MAC and extract the one in the record. uint8_t mac[EVP_MAX_MD_SIZE]; size_t mac_len; uint8_t record_mac_tmp[EVP_MAX_MD_SIZE]; uint8_t *record_mac; if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx.md)) { if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx.md, mac, &mac_len, ad_fixed, out, data_plus_mac_len, total, tls_ctx->mac_key, tls_ctx->mac_key_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx)); record_mac = record_mac_tmp; EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total); } else { // We should support the constant-time path for all CBC-mode ciphers // implemented. assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE); unsigned mac_len_u; if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) || !HMAC_Update(&tls_ctx->hmac_ctx, ad_fixed, ad_len) || !HMAC_Update(&tls_ctx->hmac_ctx, out, data_len) || !HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len_u)) { return 0; } mac_len = mac_len_u; assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx)); record_mac = &out[data_len]; } // Perform the MAC check and the padding check in constant-time. It should be // safe to simply perform the padding check first, but it would not be under a // different choice of MAC location on padding failure. See // EVP_tls_cbc_remove_padding. crypto_word_t good = constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len), 0); good &= padding_ok; if (!good) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } // End of timing-sensitive code. *out_len = data_len; return 1; }
static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, const size_t max_out_tag_len, const uint8_t *nonce, const size_t nonce_len, const uint8_t *in, const size_t in_len, const uint8_t *extra_in, const size_t extra_in_len, const uint8_t *ad, const size_t ad_len) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; if (!tls_ctx->cipher_ctx.encrypt) { // Unlike a normal AEAD, a TLS AEAD may only be used in one direction. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } if (in_len > INT_MAX) { // EVP_CIPHER takes int as input. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < aead_tls_tag_len(ctx, in_len, extra_in_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (ad_len != 13 - 2 /* length bytes */) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE); return 0; } // To allow for CBC mode which changes cipher length, |ad| doesn't include the // length for legacy ciphers. uint8_t ad_extra[2]; ad_extra[0] = (uint8_t)(in_len >> 8); ad_extra[1] = (uint8_t)(in_len & 0xff); // Compute the MAC. This must be first in case the operation is being done // in-place. uint8_t mac[EVP_MAX_MD_SIZE]; unsigned mac_len; if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) || !HMAC_Update(&tls_ctx->hmac_ctx, ad, ad_len) || !HMAC_Update(&tls_ctx->hmac_ctx, ad_extra, sizeof(ad_extra)) || !HMAC_Update(&tls_ctx->hmac_ctx, in, in_len) || !HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len)) { return 0; } // Configure the explicit IV. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } // Encrypt the input. int len; if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; } unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); // Feed the MAC into the cipher in two steps. First complete the final partial // block from encrypting the input and split the result between |out| and // |out_tag|. Then feed the rest. const size_t early_mac_len = (block_size - (in_len % block_size) % block_size); if (early_mac_len != 0) { assert(len + block_size - early_mac_len == in_len); uint8_t buf[EVP_MAX_BLOCK_LENGTH]; int buf_len; if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, buf, &buf_len, mac, (int)early_mac_len)) { return 0; } assert(buf_len == (int)block_size); OPENSSL_memcpy(out + len, buf, block_size - early_mac_len); OPENSSL_memcpy(out_tag, buf + block_size - early_mac_len, early_mac_len); } size_t tag_len = early_mac_len; if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out_tag + tag_len, &len, mac + tag_len, mac_len - tag_len)) { return 0; } tag_len += len; if (block_size > 1) { assert(block_size <= 256); assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE); // Compute padding and feed that into the cipher. uint8_t padding[256]; unsigned padding_len = block_size - ((in_len + mac_len) % block_size); OPENSSL_memset(padding, padding_len - 1, padding_len); if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out_tag + tag_len, &len, padding, (int)padding_len)) { return 0; } tag_len += len; } if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out_tag + tag_len, &len)) { return 0; } assert(len == 0); // Padding is explicit. assert(tag_len == aead_tls_tag_len(ctx, in_len, extra_in_len)); *out_tag_len = tag_len; return 1; }
int tls13_set_traffic_key(SSL *ssl, enum tls_record_type_t type, enum evp_aead_direction_t direction, const uint8_t *traffic_secret, size_t traffic_secret_len) { if (traffic_secret_len > 0xff) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return 0; } const char *phase; switch (type) { case type_early_handshake: phase = "early handshake key expansion, "; break; case type_early_data: phase = "early application data key expansion, "; break; case type_handshake: phase = "handshake key expansion, "; break; case type_data: phase = "application data key expansion, "; break; default: return 0; } size_t phase_len = strlen(phase); const char *purpose = "client write key"; if ((ssl->server && direction == evp_aead_seal) || (!ssl->server && direction == evp_aead_open)) { purpose = "server write key"; } size_t purpose_len = strlen(purpose); /* The longest label has length 38 (type_early_data) + 16 (either purpose * value). */ uint8_t label[38 + 16]; size_t label_len = phase_len + purpose_len; if (label_len > sizeof(label)) { assert(0); return 0; } memcpy(label, phase, phase_len); memcpy(label + phase_len, purpose, purpose_len); /* Look up cipher suite properties. */ const EVP_AEAD *aead; const EVP_MD *digest = ssl_get_handshake_digest(ssl_get_algorithm_prf(ssl)); size_t mac_secret_len, fixed_iv_len; if (!ssl_cipher_get_evp_aead(&aead, &mac_secret_len, &fixed_iv_len, ssl->session->cipher, ssl3_protocol_version(ssl))) { return 0; } /* Derive the key. */ size_t key_len = EVP_AEAD_key_length(aead); uint8_t key[EVP_AEAD_MAX_KEY_LENGTH]; if (!hkdf_expand_label(key, digest, traffic_secret, traffic_secret_len, label, label_len, NULL, 0, key_len)) { return 0; } /* The IV's label ends in "iv" instead of "key". */ if (label_len < 3) { assert(0); return 0; } label_len--; label[label_len - 2] = 'i'; label[label_len - 1] = 'v'; /* Derive the IV. */ size_t iv_len = EVP_AEAD_nonce_length(aead); uint8_t iv[EVP_AEAD_MAX_NONCE_LENGTH]; if (!hkdf_expand_label(iv, digest, traffic_secret, traffic_secret_len, label, label_len, NULL, 0, iv_len)) { return 0; } SSL_AEAD_CTX *traffic_aead = SSL_AEAD_CTX_new(direction, ssl3_protocol_version(ssl), ssl->session->cipher, key, key_len, NULL, 0, iv, iv_len); if (traffic_aead == NULL) { return 0; } if (direction == evp_aead_open) { if (!ssl->method->set_read_state(ssl, traffic_aead)) { return 0; } } else { if (!ssl->method->set_write_state(ssl, traffic_aead)) { return 0; } } /* Save the traffic secret. */ if (direction == evp_aead_open) { memcpy(ssl->s3->read_traffic_secret, traffic_secret, traffic_secret_len); ssl->s3->read_traffic_secret_len = traffic_secret_len; } else { memcpy(ssl->s3->write_traffic_secret, traffic_secret, traffic_secret_len); ssl->s3->write_traffic_secret_len = traffic_secret_len; } return 1; }
int tls1_setup_key_block(SSL *s) { uint8_t *p; const EVP_AEAD *aead = NULL; int ret = 0; size_t mac_secret_len, fixed_iv_len, variable_iv_len, key_len; size_t key_block_len; if (s->s3->tmp.key_block_length != 0) { return 1; } if (s->session->cipher == NULL) { goto cipher_unavailable_err; } if (!ssl_cipher_get_evp_aead(&aead, &mac_secret_len, &fixed_iv_len, s->session->cipher, ssl3_version_from_wire(s, s->version))) { goto cipher_unavailable_err; } key_len = EVP_AEAD_key_length(aead); variable_iv_len = EVP_AEAD_nonce_length(aead); if (mac_secret_len > 0) { /* For "stateful" AEADs (i.e. compatibility with pre-AEAD cipher suites) the * key length reported by |EVP_AEAD_key_length| will include the MAC key * bytes and initial implicit IV. */ if (key_len < mac_secret_len + fixed_iv_len) { OPENSSL_PUT_ERROR(SSL, tls1_setup_key_block, ERR_R_INTERNAL_ERROR); return 0; } key_len -= mac_secret_len + fixed_iv_len; } else { /* The nonce is split into a fixed portion and a variable portion. */ if (variable_iv_len < fixed_iv_len) { OPENSSL_PUT_ERROR(SSL, tls1_setup_key_block, ERR_R_INTERNAL_ERROR); return 0; } variable_iv_len -= fixed_iv_len; } assert(mac_secret_len < 256); assert(fixed_iv_len < 256); assert(variable_iv_len < 256); s->s3->tmp.new_aead = aead; s->s3->tmp.new_mac_secret_len = (uint8_t)mac_secret_len; s->s3->tmp.new_fixed_iv_len = (uint8_t)fixed_iv_len; s->s3->tmp.new_variable_iv_len = (uint8_t)variable_iv_len; key_block_len = key_len + mac_secret_len + fixed_iv_len; key_block_len *= 2; ssl3_cleanup_key_block(s); p = (uint8_t *)OPENSSL_malloc(key_block_len); if (p == NULL) { OPENSSL_PUT_ERROR(SSL, tls1_setup_key_block, ERR_R_MALLOC_FAILURE); goto err; } s->s3->tmp.key_block_length = key_block_len; s->s3->tmp.key_block = p; if (!tls1_generate_key_block(s, p, key_block_len)) { goto err; } if (!SSL_USE_EXPLICIT_IV(s) && (s->mode & SSL_MODE_CBC_RECORD_SPLITTING) != 0) { /* enable vulnerability countermeasure for CBC ciphers with known-IV * problem (http://www.openssl.org/~bodo/tls-cbc.txt). */ s->s3->need_record_splitting = 1; if (s->session->cipher != NULL && s->session->cipher->algorithm_enc == SSL_RC4) { s->s3->need_record_splitting = 0; } } ret = 1; err: return ret; cipher_unavailable_err: OPENSSL_PUT_ERROR(SSL, tls1_setup_key_block, SSL_R_CIPHER_OR_HASH_UNAVAILABLE); return 0; }
static int tls1_change_cipher_state_aead(SSL *s, char is_read, const uint8_t *key, unsigned key_len, const uint8_t *iv, unsigned iv_len, const uint8_t *mac_secret, unsigned mac_secret_len) { const EVP_AEAD *aead = s->s3->tmp.new_aead; SSL_AEAD_CTX *aead_ctx; /* merged_key is used to merge the MAC, cipher, and IV keys for an AEAD which * simulates pre-AEAD cipher suites. */ uint8_t merged_key[EVP_AEAD_MAX_KEY_LENGTH]; if (mac_secret_len > 0) { /* This is a "stateful" AEAD (for compatibility with pre-AEAD cipher * suites). */ if (mac_secret_len + key_len + iv_len > sizeof(merged_key)) { OPENSSL_PUT_ERROR(SSL, tls1_change_cipher_state_aead, ERR_R_INTERNAL_ERROR); return 0; } memcpy(merged_key, mac_secret, mac_secret_len); memcpy(merged_key + mac_secret_len, key, key_len); memcpy(merged_key + mac_secret_len + key_len, iv, iv_len); key = merged_key; key_len += mac_secret_len; key_len += iv_len; } if (is_read) { if (!tls1_aead_ctx_init(&s->aead_read_ctx)) { return 0; } aead_ctx = s->aead_read_ctx; } else { if (SSL_IS_DTLS(s) && s->aead_write_ctx != NULL) { /* DTLS renegotiation is unsupported, so a CCS can only switch away from * the NULL cipher. This simplifies renegotiation. */ OPENSSL_PUT_ERROR(SSL, tls1_change_cipher_state_aead, ERR_R_INTERNAL_ERROR); return 0; } if (!tls1_aead_ctx_init(&s->aead_write_ctx)) { return 0; } aead_ctx = s->aead_write_ctx; } if (!EVP_AEAD_CTX_init_with_direction( &aead_ctx->ctx, aead, key, key_len, EVP_AEAD_DEFAULT_TAG_LENGTH, is_read ? evp_aead_open : evp_aead_seal)) { OPENSSL_free(aead_ctx); if (is_read) { s->aead_read_ctx = NULL; } else { s->aead_write_ctx = NULL; } return 0; } if (mac_secret_len == 0) { /* For a real AEAD, the IV is the fixed part of the nonce. */ if (iv_len > sizeof(aead_ctx->fixed_nonce)) { OPENSSL_PUT_ERROR(SSL, tls1_change_cipher_state_aead, ERR_R_INTERNAL_ERROR); return 0; } memcpy(aead_ctx->fixed_nonce, iv, iv_len); aead_ctx->fixed_nonce_len = iv_len; aead_ctx->variable_nonce_included_in_record = (s->s3->tmp.new_cipher->algorithm2 & SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD) != 0; aead_ctx->random_variable_nonce = 0; aead_ctx->omit_length_in_ad = 0; } else { aead_ctx->fixed_nonce_len = 0; aead_ctx->variable_nonce_included_in_record = 1; aead_ctx->random_variable_nonce = 1; aead_ctx->omit_length_in_ad = 1; } aead_ctx->variable_nonce_len = s->s3->tmp.new_variable_iv_len; aead_ctx->omit_version_in_ad = (s->version == SSL3_VERSION); if (aead_ctx->variable_nonce_len + aead_ctx->fixed_nonce_len != EVP_AEAD_nonce_length(aead)) { OPENSSL_PUT_ERROR(SSL, tls1_change_cipher_state_aead, ERR_R_INTERNAL_ERROR); return 0; } aead_ctx->tag_len = EVP_AEAD_max_overhead(aead); return 1; }
int BORINGSSL_self_test(void) { static const uint8_t kAESKey[16] = "BoringCrypto Key"; static const uint8_t kAESIV[16] = {0}; static const uint8_t kPlaintext[64] = "BoringCryptoModule FIPS KAT Encryption and Decryption Plaintext!"; static const uint8_t kAESCBCCiphertext[64] = { 0x87, 0x2d, 0x98, 0xc2, 0xcc, 0x31, 0x5b, 0x41, 0xe0, 0xfa, 0x7b, 0x0a, 0x71, 0xc0, 0x42, 0xbf, 0x4f, 0x61, 0xd0, 0x0d, 0x58, 0x8c, 0xf7, 0x05, 0xfb, 0x94, 0x89, 0xd3, 0xbc, 0xaa, 0x1a, 0x50, 0x45, 0x1f, 0xc3, 0x8c, 0xb8, 0x98, 0x86, 0xa3, 0xe3, 0x6c, 0xfc, 0xad, 0x3a, 0xb5, 0x59, 0x27, 0x7d, 0x21, 0x07, 0xca, 0x4c, 0x1d, 0x55, 0x34, 0xdd, 0x5a, 0x2d, 0xc4, 0xb4, 0xf5, 0xa8, #if !defined(BORINGSSL_FIPS_BREAK_AES_CBC) 0x35 #else 0x00 #endif }; static const uint8_t kAESGCMCiphertext[80] = { 0x4a, 0xd8, 0xe7, 0x7d, 0x78, 0xd7, 0x7d, 0x5e, 0xb2, 0x11, 0xb6, 0xc9, 0xa4, 0xbc, 0xb2, 0xae, 0xbe, 0x93, 0xd1, 0xb7, 0xfe, 0x65, 0xc1, 0x82, 0x2a, 0xb6, 0x71, 0x5f, 0x1a, 0x7c, 0xe0, 0x1b, 0x2b, 0xe2, 0x53, 0xfa, 0xa0, 0x47, 0xfa, 0xd7, 0x8f, 0xb1, 0x4a, 0xc4, 0xdc, 0x89, 0xf9, 0xb4, 0x14, 0x4d, 0xde, 0x95, 0xea, 0x29, 0x69, 0x76, 0x81, 0xa3, 0x5c, 0x33, 0xd8, 0x37, 0xd8, 0xfa, 0x47, 0x19, 0x46, 0x2f, 0xf1, 0x90, 0xb7, 0x61, 0x8f, 0x6f, 0xdd, 0x31, 0x3f, 0x6a, 0x64, #if !defined(BORINGSSL_FIPS_BREAK_AES_GCM) 0x0d #else 0x00 #endif }; static const DES_cblock kDESKey1 = {"BCMDESK1"}; static const DES_cblock kDESKey2 = {"BCMDESK2"}; static const DES_cblock kDESKey3 = {"BCMDESK3"}; static const DES_cblock kDESIV = {"BCMDESIV"}; static const uint8_t kDESCiphertext[64] = { 0xa4, 0x30, 0x7a, 0x4c, 0x1f, 0x60, 0x16, 0xd7, 0x4f, 0x41, 0xe1, 0xbb, 0x27, 0xc4, 0x27, 0x37, 0xd4, 0x7f, 0xb9, 0x10, 0xf8, 0xbc, 0xaf, 0x93, 0x91, 0xb8, 0x88, 0x24, 0xb1, 0xf6, 0xf8, 0xbd, 0x31, 0x96, 0x06, 0x76, 0xde, 0x32, 0xcd, 0x29, 0x29, 0xba, 0x70, 0x5f, 0xea, 0xc0, 0xcb, 0xde, 0xc7, 0x75, 0x90, 0xe0, 0x0f, 0x5e, 0x2c, 0x0d, 0x49, 0x20, 0xd5, 0x30, 0x83, 0xf8, 0x08, #if !defined(BORINGSSL_FIPS_BREAK_DES) 0x5a #else 0x00 #endif }; static const uint8_t kPlaintextSHA1[20] = { 0xc6, 0xf8, 0xc9, 0x63, 0x1c, 0x14, 0x23, 0x62, 0x9b, 0xbd, 0x55, 0x82, 0xf4, 0xd6, 0x1d, 0xf2, 0xab, 0x7d, 0xc8, #if !defined(BORINGSSL_FIPS_BREAK_SHA_1) 0x28 #else 0x00 #endif }; static const uint8_t kPlaintextSHA256[32] = { 0x37, 0xbd, 0x70, 0x53, 0x72, 0xfc, 0xd4, 0x03, 0x79, 0x70, 0xfb, 0x06, 0x95, 0xb1, 0x2a, 0x82, 0x48, 0xe1, 0x3e, 0xf2, 0x33, 0xfb, 0xef, 0x29, 0x81, 0x22, 0x45, 0x40, 0x43, 0x70, 0xce, #if !defined(BORINGSSL_FIPS_BREAK_SHA_256) 0x0f #else 0x00 #endif }; static const uint8_t kPlaintextSHA512[64] = { 0x08, 0x6a, 0x1c, 0x84, 0x61, 0x9d, 0x8e, 0xb3, 0xc0, 0x97, 0x4e, 0xa1, 0x9f, 0x9c, 0xdc, 0xaf, 0x3b, 0x5c, 0x31, 0xf0, 0xf2, 0x74, 0xc3, 0xbd, 0x6e, 0xd6, 0x1e, 0xb2, 0xbb, 0x34, 0x74, 0x72, 0x5c, 0x51, 0x29, 0x8b, 0x87, 0x3a, 0xa3, 0xf2, 0x25, 0x23, 0xd4, 0x1c, 0x82, 0x1b, 0xfe, 0xd3, 0xc6, 0xee, 0xb5, 0xd6, 0xaf, 0x07, 0x7b, 0x98, 0xca, 0xa7, 0x01, 0xf3, 0x94, 0xf3, 0x68, #if !defined(BORINGSSL_FIPS_BREAK_SHA_512) 0x14 #else 0x00 #endif }; static const uint8_t kRSASignature[256] = { 0x62, 0x66, 0x4b, 0xe3, 0xb1, 0xd2, 0x83, 0xf1, 0xa8, 0x56, 0x2b, 0x33, 0x60, 0x1e, 0xdb, 0x1e, 0x06, 0xf7, 0xa7, 0x1e, 0xa8, 0xef, 0x03, 0x4d, 0x0c, 0xf6, 0x83, 0x75, 0x7a, 0xf0, 0x14, 0xc7, 0xe2, 0x94, 0x3a, 0xb5, 0x67, 0x56, 0xa5, 0x48, 0x7f, 0x3a, 0xa5, 0xbf, 0xf7, 0x1d, 0x44, 0xa6, 0x34, 0xed, 0x9b, 0xd6, 0x51, 0xaa, 0x2c, 0x4e, 0xce, 0x60, 0x5f, 0xe9, 0x0e, 0xd5, 0xcd, 0xeb, 0x23, 0x27, 0xf8, 0xfb, 0x45, 0xe5, 0x34, 0x63, 0x77, 0x7f, 0x2e, 0x80, 0xcf, 0x9d, 0x2e, 0xfc, 0xe2, 0x50, 0x75, 0x29, 0x46, 0xf4, 0xaf, 0x91, 0xed, 0x36, 0xe1, 0x5e, 0xef, 0x66, 0xa1, 0xff, 0x27, 0xfc, 0x87, 0x7e, 0x60, 0x84, 0x0f, 0x54, 0x51, 0x56, 0x0f, 0x68, 0x99, 0xc0, 0x3f, 0xeb, 0xa5, 0xa0, 0x46, 0xb0, 0x86, 0x02, 0xb0, 0xc8, 0xe8, 0x46, 0x13, 0x06, 0xcd, 0xb7, 0x8a, 0xd0, 0x3b, 0x46, 0xd0, 0x14, 0x64, 0x53, 0x9b, 0x5b, 0x5e, 0x02, 0x45, 0xba, 0x6e, 0x7e, 0x0a, 0xb9, 0x9e, 0x62, 0xb7, 0xd5, 0x7a, 0x87, 0xea, 0xd3, 0x24, 0xa5, 0xef, 0xb3, 0xdc, 0x05, 0x9c, 0x04, 0x60, 0x4b, 0xde, 0xa8, 0x90, 0x08, 0x7b, 0x6a, 0x5f, 0xb4, 0x3f, 0xda, 0xc5, 0x1f, 0x6e, 0xd6, 0x15, 0xde, 0x65, 0xa4, 0x6e, 0x62, 0x9d, 0x8f, 0xa8, 0xbe, 0x86, 0xf6, 0x09, 0x90, 0x40, 0xa5, 0xf4, 0x23, 0xc5, 0xf6, 0x38, 0x86, 0x0d, 0x1c, 0xed, 0x4a, 0x0a, 0xae, 0xa4, 0x26, 0xc2, 0x2e, 0xd3, 0x13, 0x66, 0x61, 0xea, 0x35, 0x01, 0x0e, 0x13, 0xda, 0x78, 0x20, 0xae, 0x59, 0x5f, 0x9b, 0xa9, 0x6c, 0xf9, 0x1b, 0xdf, 0x76, 0x53, 0xc8, 0xa7, 0xf5, 0x63, 0x6d, 0xf3, 0xff, 0xfd, 0xaf, 0x75, 0x4b, 0xac, 0x67, 0xb1, 0x3c, 0xbf, 0x5e, 0xde, 0x73, 0x02, 0x6d, 0xd2, 0x0c, 0xb1, #if !defined(BORINGSSL_FIPS_BREAK_RSA_SIG) 0x64 #else 0x00 #endif }; const uint8_t kDRBGEntropy[48] = "BCM Known Answer Test DBRG Initial Entropy "; const uint8_t kDRBGPersonalization[18] = "BCMPersonalization"; const uint8_t kDRBGAD[16] = "BCM DRBG KAT AD "; const uint8_t kDRBGOutput[64] = { 0x1d, 0x63, 0xdf, 0x05, 0x51, 0x49, 0x22, 0x46, 0xcd, 0x9b, 0xc5, 0xbb, 0xf1, 0x5d, 0x44, 0xae, 0x13, 0x78, 0xb1, 0xe4, 0x7c, 0xf1, 0x96, 0x33, 0x3d, 0x60, 0xb6, 0x29, 0xd4, 0xbb, 0x6b, 0x44, 0xf9, 0xef, 0xd9, 0xf4, 0xa2, 0xba, 0x48, 0xea, 0x39, 0x75, 0x59, 0x32, 0xf7, 0x31, 0x2c, 0x98, 0x14, 0x2b, 0x49, 0xdf, 0x02, 0xb6, 0x5d, 0x71, 0x09, 0x50, 0xdb, 0x23, 0xdb, 0xe5, 0x22, #if !defined(BORINGSSL_FIPS_BREAK_DRBG) 0x95 #else 0x00 #endif }; const uint8_t kDRBGEntropy2[48] = "BCM Known Answer Test DBRG Reseed Entropy "; const uint8_t kDRBGReseedOutput[64] = { 0xa4, 0x77, 0x05, 0xdb, 0x14, 0x11, 0x76, 0x71, 0x42, 0x5b, 0xd8, 0xd7, 0xa5, 0x4f, 0x8b, 0x39, 0xf2, 0x10, 0x4a, 0x50, 0x5b, 0xa2, 0xc8, 0xf0, 0xbb, 0x3e, 0xa1, 0xa5, 0x90, 0x7d, 0x54, 0xd9, 0xc6, 0xb0, 0x96, 0xc0, 0x2b, 0x7e, 0x9b, 0xc9, 0xa1, 0xdd, 0x78, 0x2e, 0xd5, 0xa8, 0x66, 0x16, 0xbd, 0x18, 0x3c, 0xf2, 0xaa, 0x7a, 0x2b, 0x37, 0xf9, 0xab, 0x35, 0x64, 0x15, 0x01, 0x3f, 0xc4, }; const uint8_t kECDSASigR[32] = { 0x67, 0x80, 0xc5, 0xfc, 0x70, 0x27, 0x5e, 0x2c, 0x70, 0x61, 0xa0, 0xe7, 0x87, 0x7b, 0xb1, 0x74, 0xde, 0xad, 0xeb, 0x98, 0x87, 0x02, 0x7f, 0x3f, 0xa8, 0x36, 0x54, 0x15, 0x8b, 0xa7, 0xf5, #if !defined(BORINGSSL_FIPS_BREAK_ECDSA_SIG) 0x0c, #else 0x00, #endif }; const uint8_t kECDSASigS[32] = { 0xa5, 0x93, 0xe0, 0x23, 0x91, 0xe7, 0x4b, 0x8d, 0x77, 0x25, 0xa6, 0xba, 0x4d, 0xd9, 0x86, 0x77, 0xda, 0x7d, 0x8f, 0xef, 0xc4, 0x1a, 0xf0, 0xcc, 0x81, 0xe5, 0xea, 0x3f, 0xc2, 0x41, 0x7f, 0xd8, }; EVP_AEAD_CTX aead_ctx; EVP_AEAD_CTX_zero(&aead_ctx); RSA *rsa_key = NULL; EC_KEY *ec_key = NULL; ECDSA_SIG *sig = NULL; int ret = 0; AES_KEY aes_key; uint8_t aes_iv[16]; uint8_t output[256]; // AES-CBC Encryption KAT memcpy(aes_iv, kAESIV, sizeof(kAESIV)); if (AES_set_encrypt_key(kAESKey, 8 * sizeof(kAESKey), &aes_key) != 0) { goto err; } AES_cbc_encrypt(kPlaintext, output, sizeof(kPlaintext), &aes_key, aes_iv, AES_ENCRYPT); if (!check_test(kAESCBCCiphertext, output, sizeof(kAESCBCCiphertext), "AES-CBC Encryption KAT")) { goto err; } // AES-CBC Decryption KAT memcpy(aes_iv, kAESIV, sizeof(kAESIV)); if (AES_set_decrypt_key(kAESKey, 8 * sizeof(kAESKey), &aes_key) != 0) { goto err; } AES_cbc_encrypt(kAESCBCCiphertext, output, sizeof(kAESCBCCiphertext), &aes_key, aes_iv, AES_DECRYPT); if (!check_test(kPlaintext, output, sizeof(kPlaintext), "AES-CBC Decryption KAT")) { goto err; } size_t out_len; uint8_t nonce[EVP_AEAD_MAX_NONCE_LENGTH]; OPENSSL_memset(nonce, 0, sizeof(nonce)); if (!EVP_AEAD_CTX_init(&aead_ctx, EVP_aead_aes_128_gcm(), kAESKey, sizeof(kAESKey), 0, NULL)) { goto err; } // AES-GCM Encryption KAT if (!EVP_AEAD_CTX_seal(&aead_ctx, output, &out_len, sizeof(output), nonce, EVP_AEAD_nonce_length(EVP_aead_aes_128_gcm()), kPlaintext, sizeof(kPlaintext), NULL, 0) || !check_test(kAESGCMCiphertext, output, sizeof(kAESGCMCiphertext), "AES-GCM Encryption KAT")) { goto err; } // AES-GCM Decryption KAT if (!EVP_AEAD_CTX_open(&aead_ctx, output, &out_len, sizeof(output), nonce, EVP_AEAD_nonce_length(EVP_aead_aes_128_gcm()), kAESGCMCiphertext, sizeof(kAESGCMCiphertext), NULL, 0) || !check_test(kPlaintext, output, sizeof(kPlaintext), "AES-GCM Decryption KAT")) { goto err; } DES_key_schedule des1, des2, des3; DES_cblock des_iv; DES_set_key(&kDESKey1, &des1); DES_set_key(&kDESKey2, &des2); DES_set_key(&kDESKey3, &des3); // 3DES Encryption KAT memcpy(&des_iv, &kDESIV, sizeof(des_iv)); DES_ede3_cbc_encrypt(kPlaintext, output, sizeof(kPlaintext), &des1, &des2, &des3, &des_iv, DES_ENCRYPT); if (!check_test(kDESCiphertext, output, sizeof(kDESCiphertext), "3DES Encryption KAT")) { goto err; } // 3DES Decryption KAT memcpy(&des_iv, &kDESIV, sizeof(des_iv)); DES_ede3_cbc_encrypt(kDESCiphertext, output, sizeof(kDESCiphertext), &des1, &des2, &des3, &des_iv, DES_DECRYPT); if (!check_test(kPlaintext, output, sizeof(kPlaintext), "3DES Decryption KAT")) { goto err; } // SHA-1 KAT SHA1(kPlaintext, sizeof(kPlaintext), output); if (!check_test(kPlaintextSHA1, output, sizeof(kPlaintextSHA1), "SHA-1 KAT")) { goto err; } // SHA-256 KAT SHA256(kPlaintext, sizeof(kPlaintext), output); if (!check_test(kPlaintextSHA256, output, sizeof(kPlaintextSHA256), "SHA-256 KAT")) { goto err; } // SHA-512 KAT SHA512(kPlaintext, sizeof(kPlaintext), output); if (!check_test(kPlaintextSHA512, output, sizeof(kPlaintextSHA512), "SHA-512 KAT")) { goto err; } rsa_key = self_test_rsa_key(); if (rsa_key == NULL) { fprintf(stderr, "RSA KeyGen failed\n"); goto err; } // RSA Sign KAT unsigned sig_len; // Disable blinding for the power-on tests because it's not needed and // triggers an entropy draw. rsa_key->flags |= RSA_FLAG_NO_BLINDING; if (!RSA_sign(NID_sha256, kPlaintextSHA256, sizeof(kPlaintextSHA256), output, &sig_len, rsa_key) || !check_test(kRSASignature, output, sizeof(kRSASignature), "RSA Sign KAT")) { goto err; } // RSA Verify KAT if (!RSA_verify(NID_sha256, kPlaintextSHA256, sizeof(kPlaintextSHA256), kRSASignature, sizeof(kRSASignature), rsa_key)) { fprintf(stderr, "RSA Verify KAT failed.\n"); goto err; } ec_key = self_test_ecdsa_key(); if (ec_key == NULL) { fprintf(stderr, "ECDSA KeyGen failed\n"); goto err; } // ECDSA Sign/Verify PWCT // The 'k' value for ECDSA is fixed to avoid an entropy draw. ec_key->fixed_k = BN_new(); if (ec_key->fixed_k == NULL || !BN_set_word(ec_key->fixed_k, 42)) { fprintf(stderr, "Out of memory\n"); goto err; } sig = ECDSA_do_sign(kPlaintextSHA256, sizeof(kPlaintextSHA256), ec_key); uint8_t ecdsa_r_bytes[sizeof(kECDSASigR)]; uint8_t ecdsa_s_bytes[sizeof(kECDSASigS)]; if (sig == NULL || BN_num_bytes(sig->r) != sizeof(ecdsa_r_bytes) || !BN_bn2bin(sig->r, ecdsa_r_bytes) || BN_num_bytes(sig->s) != sizeof(ecdsa_s_bytes) || !BN_bn2bin(sig->s, ecdsa_s_bytes) || !check_test(kECDSASigR, ecdsa_r_bytes, sizeof(kECDSASigR), "ECDSA R") || !check_test(kECDSASigS, ecdsa_s_bytes, sizeof(kECDSASigS), "ECDSA S")) { fprintf(stderr, "ECDSA KAT failed.\n"); goto err; } // DBRG KAT CTR_DRBG_STATE drbg; if (!CTR_DRBG_init(&drbg, kDRBGEntropy, kDRBGPersonalization, sizeof(kDRBGPersonalization)) || !CTR_DRBG_generate(&drbg, output, sizeof(kDRBGOutput), kDRBGAD, sizeof(kDRBGAD)) || !check_test(kDRBGOutput, output, sizeof(kDRBGOutput), "DBRG Generate KAT") || !CTR_DRBG_reseed(&drbg, kDRBGEntropy2, kDRBGAD, sizeof(kDRBGAD)) || !CTR_DRBG_generate(&drbg, output, sizeof(kDRBGReseedOutput), kDRBGAD, sizeof(kDRBGAD)) || !check_test(kDRBGReseedOutput, output, sizeof(kDRBGReseedOutput), "DRBG Reseed KAT")) { goto err; } CTR_DRBG_clear(&drbg); CTR_DRBG_STATE kZeroDRBG; memset(&kZeroDRBG, 0, sizeof(kZeroDRBG)); if (!check_test(&kZeroDRBG, &drbg, sizeof(drbg), "DRBG Clear KAT")) { goto err; } ret = 1; err: EVP_AEAD_CTX_cleanup(&aead_ctx); RSA_free(rsa_key); EC_KEY_free(ec_key); ECDSA_SIG_free(sig); return ret; }