/* reverse_and_mulX_ghash interprets the bytes |b->c| as a reversed element of * the GHASH field, multiplies that by 'x' and serialises the result back into * |b|, but with GHASH's backwards bit ordering. */ static void reverse_and_mulX_ghash(polyval_block *b) { uint64_t hi = b->u[0]; uint64_t lo = b->u[1]; const crypto_word_t carry = constant_time_eq_w(hi & 1, 1); hi >>= 1; hi |= lo << 63; lo >>= 1; lo ^= ((uint64_t) constant_time_select_w(carry, 0xe1, 0)) << 56; b->u[0] = CRYPTO_bswap8(lo); b->u[1] = CRYPTO_bswap8(hi); }
static int aead_aes_gcm_tls12_seal_scatter( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { struct aead_aes_gcm_tls12_ctx *gcm_ctx = ctx->aead_state; if (nonce_len != 12) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } // The given nonces must be strictly monotonically increasing. uint64_t given_counter; OPENSSL_memcpy(&given_counter, nonce + nonce_len - sizeof(given_counter), sizeof(given_counter)); given_counter = CRYPTO_bswap8(given_counter); if (given_counter == UINT64_MAX || given_counter < gcm_ctx->min_next_nonce) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE); return 0; } gcm_ctx->min_next_nonce = given_counter + 1; return aead_aes_gcm_seal_scatter(ctx, out, out_tag, out_tag_len, max_out_tag_len, nonce, nonce_len, in, in_len, extra_in, extra_in_len, ad, ad_len); }
int ASN1_INTEGER_set_uint64(ASN1_INTEGER *out, uint64_t v) { uint8_t *const newdata = OPENSSL_malloc(sizeof(uint64_t)); if (newdata == NULL) { OPENSSL_PUT_ERROR(ASN1, ERR_R_MALLOC_FAILURE); return 0; } OPENSSL_free(out->data); out->data = newdata; v = CRYPTO_bswap8(v); memcpy(out->data, &v, sizeof(v)); out->type = V_ASN1_INTEGER; size_t leading_zeros; for (leading_zeros = 0; leading_zeros < sizeof(uint64_t) - 1; leading_zeros++) { if (out->data[leading_zeros] != 0) { break; } } out->length = sizeof(uint64_t) - leading_zeros; OPENSSL_memmove(out->data, out->data + leading_zeros, out->length); return 1; }
static int aead_aes_gcm_tls13_seal_scatter( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { struct aead_aes_gcm_tls13_ctx *gcm_ctx = (struct aead_aes_gcm_tls13_ctx *) &ctx->state; if (nonce_len != 12) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } // The given nonces must be strictly monotonically increasing. See // https://tools.ietf.org/html/rfc8446#section-5.3 for details of the TLS 1.3 // nonce construction. uint64_t given_counter; OPENSSL_memcpy(&given_counter, nonce + nonce_len - sizeof(given_counter), sizeof(given_counter)); given_counter = CRYPTO_bswap8(given_counter); if (gcm_ctx->first) { // In the first call the sequence number will be zero and therefore the // given nonce will be 0 ^ mask = mask. gcm_ctx->mask = given_counter; gcm_ctx->first = 0; } given_counter ^= gcm_ctx->mask; if (given_counter == UINT64_MAX || given_counter < gcm_ctx->min_next_nonce) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE); return 0; } gcm_ctx->min_next_nonce = given_counter + 1; return aead_aes_gcm_seal_scatter(ctx, out, out_tag, out_tag_len, max_out_tag_len, nonce, nonce_len, in, in_len, extra_in, extra_in_len, ad, ad_len); }
/* byte_reverse reverses the order of the bytes in |b->c|. */ static void byte_reverse(polyval_block *b) { const uint64_t t = CRYPTO_bswap8(b->u[0]); b->u[0] = CRYPTO_bswap8(b->u[1]); b->u[1] = t; }