int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, size_t *md_out_size, const uint8_t header[13], const uint8_t *data, size_t data_plus_mac_size, size_t data_plus_mac_plus_padding_size, const uint8_t *mac_secret, unsigned mac_secret_length) { HASH_CTX md_state; void (*md_final_raw)(HASH_CTX *ctx, uint8_t *md_out); void (*md_transform)(HASH_CTX *ctx, const uint8_t *block); unsigned md_size, md_block_size = 64; /* md_length_size is the number of bytes in the length field that terminates * the hash. */ unsigned md_length_size = 8; /* Bound the acceptable input so we can forget about many possible overflows * later in this function. This is redundant with the record size limits in * TLS. */ if (data_plus_mac_plus_padding_size >= 1024 * 1024) { assert(0); return 0; } switch (EVP_MD_type(md)) { case NID_sha1: SHA1_Init(&md_state.sha1); md_final_raw = tls1_sha1_final_raw; md_transform = tls1_sha1_transform; md_size = SHA_DIGEST_LENGTH; break; case NID_sha256: SHA256_Init(&md_state.sha256); md_final_raw = tls1_sha256_final_raw; md_transform = tls1_sha256_transform; md_size = SHA256_DIGEST_LENGTH; break; case NID_sha384: SHA384_Init(&md_state.sha512); md_final_raw = tls1_sha512_final_raw; md_transform = tls1_sha512_transform; md_size = SHA384_DIGEST_LENGTH; md_block_size = 128; md_length_size = 16; break; default: /* EVP_tls_cbc_record_digest_supported should have been called first to * check that the hash function is supported. */ assert(0); *md_out_size = 0; return 0; } assert(md_length_size <= MAX_HASH_BIT_COUNT_BYTES); assert(md_block_size <= MAX_HASH_BLOCK_SIZE); assert(md_size <= EVP_MAX_MD_SIZE); static const size_t kHeaderLength = 13; /* kVarianceBlocks is the number of blocks of the hash that we have to * calculate in constant time because they could be altered by the * padding value. * * TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not * required to be minimal. Therefore we say that the final six blocks * can vary based on the padding. */ static const size_t kVarianceBlocks = 6; /* From now on we're dealing with the MAC, which conceptually has 13 * bytes of `header' before the start of the data. */ size_t len = data_plus_mac_plus_padding_size + kHeaderLength; /* max_mac_bytes contains the maximum bytes of bytes in the MAC, including * |header|, assuming that there's no padding. */ size_t max_mac_bytes = len - md_size - 1; /* num_blocks is the maximum number of hash blocks. */ size_t num_blocks = (max_mac_bytes + 1 + md_length_size + md_block_size - 1) / md_block_size; /* In order to calculate the MAC in constant time we have to handle * the final blocks specially because the padding value could cause the * end to appear somewhere in the final |kVarianceBlocks| blocks and we * can't leak where. However, |num_starting_blocks| worth of data can * be hashed right away because no padding value can affect whether * they are plaintext. */ size_t num_starting_blocks = 0; /* k is the starting byte offset into the conceptual header||data where * we start processing. */ size_t k = 0; /* mac_end_offset is the index just past the end of the data to be * MACed. */ size_t mac_end_offset = data_plus_mac_size + kHeaderLength - md_size; /* c is the index of the 0x80 byte in the final hash block that * contains application data. */ size_t c = mac_end_offset % md_block_size; /* index_a is the hash block number that contains the 0x80 terminating * value. */ size_t index_a = mac_end_offset / md_block_size; /* index_b is the hash block number that contains the 64-bit hash * length, in bits. */ size_t index_b = (mac_end_offset + md_length_size) / md_block_size; if (num_blocks > kVarianceBlocks) { num_starting_blocks = num_blocks - kVarianceBlocks; k = md_block_size * num_starting_blocks; } /* bits is the hash-length in bits. It includes the additional hash * block for the masked HMAC key. */ size_t bits = 8 * mac_end_offset; /* at most 18 bits to represent */ /* Compute the initial HMAC block. */ bits += 8 * md_block_size; /* hmac_pad is the masked HMAC key. */ uint8_t hmac_pad[MAX_HASH_BLOCK_SIZE]; OPENSSL_memset(hmac_pad, 0, md_block_size); assert(mac_secret_length <= sizeof(hmac_pad)); OPENSSL_memcpy(hmac_pad, mac_secret, mac_secret_length); for (size_t i = 0; i < md_block_size; i++) { hmac_pad[i] ^= 0x36; } md_transform(&md_state, hmac_pad); /* The length check means |bits| fits in four bytes. */ uint8_t length_bytes[MAX_HASH_BIT_COUNT_BYTES]; OPENSSL_memset(length_bytes, 0, md_length_size - 4); length_bytes[md_length_size - 4] = (uint8_t)(bits >> 24); length_bytes[md_length_size - 3] = (uint8_t)(bits >> 16); length_bytes[md_length_size - 2] = (uint8_t)(bits >> 8); length_bytes[md_length_size - 1] = (uint8_t)bits; if (k > 0) { /* k is a multiple of md_block_size. */ uint8_t first_block[MAX_HASH_BLOCK_SIZE]; OPENSSL_memcpy(first_block, header, 13); OPENSSL_memcpy(first_block + 13, data, md_block_size - 13); md_transform(&md_state, first_block); for (size_t i = 1; i < k / md_block_size; i++) { md_transform(&md_state, data + md_block_size * i - 13); } } uint8_t mac_out[EVP_MAX_MD_SIZE]; OPENSSL_memset(mac_out, 0, sizeof(mac_out)); /* We now process the final hash blocks. For each block, we construct * it in constant time. If the |i==index_a| then we'll include the 0x80 * bytes and zero pad etc. For each block we selectively copy it, in * constant time, to |mac_out|. */ for (size_t i = num_starting_blocks; i <= num_starting_blocks + kVarianceBlocks; i++) { uint8_t block[MAX_HASH_BLOCK_SIZE]; uint8_t is_block_a = constant_time_eq_8(i, index_a); uint8_t is_block_b = constant_time_eq_8(i, index_b); for (size_t j = 0; j < md_block_size; j++) { uint8_t b = 0; if (k < kHeaderLength) { b = header[k]; } else if (k < data_plus_mac_plus_padding_size + kHeaderLength) { b = data[k - kHeaderLength]; } k++; uint8_t is_past_c = is_block_a & constant_time_ge_8(j, c); uint8_t is_past_cp1 = is_block_a & constant_time_ge_8(j, c + 1); /* If this is the block containing the end of the * application data, and we are at the offset for the * 0x80 value, then overwrite b with 0x80. */ b = constant_time_select_8(is_past_c, 0x80, b); /* If this the the block containing the end of the * application data and we're past the 0x80 value then * just write zero. */ b = b & ~is_past_cp1; /* If this is index_b (the final block), but not * index_a (the end of the data), then the 64-bit * length didn't fit into index_a and we're having to * add an extra block of zeros. */ b &= ~is_block_b | is_block_a; /* The final bytes of one of the blocks contains the * length. */ if (j >= md_block_size - md_length_size) { /* If this is index_b, write a length byte. */ b = constant_time_select_8( is_block_b, length_bytes[j - (md_block_size - md_length_size)], b); } block[j] = b; } md_transform(&md_state, block); md_final_raw(&md_state, block); /* If this is index_b, copy the hash value to |mac_out|. */ for (size_t j = 0; j < md_size; j++) { mac_out[j] |= block[j] & is_block_b; } } EVP_MD_CTX md_ctx; EVP_MD_CTX_init(&md_ctx); if (!EVP_DigestInit_ex(&md_ctx, md, NULL /* engine */)) { EVP_MD_CTX_cleanup(&md_ctx); return 0; } /* Complete the HMAC in the standard manner. */ for (size_t i = 0; i < md_block_size; i++) { hmac_pad[i] ^= 0x6a; } EVP_DigestUpdate(&md_ctx, hmac_pad, md_block_size); EVP_DigestUpdate(&md_ctx, mac_out, md_size); unsigned md_out_size_u; EVP_DigestFinal(&md_ctx, md_out, &md_out_size_u); *md_out_size = md_out_size_u; EVP_MD_CTX_cleanup(&md_ctx); return 1; }
void EVP_tls_cbc_copy_mac(uint8_t *out, unsigned md_size, const uint8_t *in, unsigned in_len, unsigned orig_len) { #if defined(CBC_MAC_ROTATE_IN_PLACE) uint8_t rotated_mac_buf[64 + EVP_MAX_MD_SIZE]; uint8_t *rotated_mac; #else uint8_t rotated_mac[EVP_MAX_MD_SIZE]; #endif /* mac_end is the index of |in| just after the end of the MAC. */ unsigned mac_end = in_len; unsigned mac_start = mac_end - md_size; /* scan_start contains the number of bytes that we can ignore because * the MAC's position can only vary by 255 bytes. */ unsigned scan_start = 0; unsigned i, j; unsigned div_spoiler; unsigned rotate_offset; assert(orig_len >= in_len); assert(in_len >= md_size); assert(md_size <= EVP_MAX_MD_SIZE); #if defined(CBC_MAC_ROTATE_IN_PLACE) rotated_mac = rotated_mac_buf + ((0 - (size_t)rotated_mac_buf) & 63); #endif /* This information is public so it's safe to branch based on it. */ if (orig_len > md_size + 255 + 1) { scan_start = orig_len - (md_size + 255 + 1); } /* div_spoiler contains a multiple of md_size that is used to cause the * modulo operation to be constant time. Without this, the time varies * based on the amount of padding when running on Intel chips at least. * * The aim of right-shifting md_size is so that the compiler doesn't * figure out that it can remove div_spoiler as that would require it * to prove that md_size is always even, which I hope is beyond it. */ div_spoiler = md_size >> 1; div_spoiler <<= (sizeof(div_spoiler) - 1) * 8; rotate_offset = (div_spoiler + mac_start - scan_start) % md_size; memset(rotated_mac, 0, md_size); for (i = scan_start, j = 0; i < orig_len; i++) { uint8_t mac_started = constant_time_ge_8(i, mac_start); uint8_t mac_ended = constant_time_ge_8(i, mac_end); uint8_t b = in[i]; rotated_mac[j++] |= b & mac_started & ~mac_ended; j &= constant_time_lt(j, md_size); } /* Now rotate the MAC */ #if defined(CBC_MAC_ROTATE_IN_PLACE) j = 0; for (i = 0; i < md_size; i++) { /* in case cache-line is 32 bytes, touch second line */ ((volatile uint8_t *)rotated_mac)[rotate_offset ^ 32]; out[j++] = rotated_mac[rotate_offset++]; rotate_offset &= constant_time_lt(rotate_offset, md_size); } #else memset(out, 0, md_size); rotate_offset = md_size - rotate_offset; rotate_offset &= constant_time_lt(rotate_offset, md_size); for (i = 0; i < md_size; i++) { for (j = 0; j < md_size; j++) { out[j] |= rotated_mac[i] & constant_time_eq_8(j, rotate_offset); } rotate_offset++; rotate_offset &= constant_time_lt(rotate_offset, md_size); } #endif }