int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, size_t key_len, const EVP_MD *md, ENGINE *impl) { if (md == NULL) { md = ctx->md; } // If either |key| is non-NULL or |md| has changed, initialize with a new key // rather than rewinding the previous one. // // TODO(davidben,eroman): Passing the previous |md| with a NULL |key| is // ambiguous between using the empty key and reusing the previous key. There // exist callers which intend the latter, but the former is an awkward edge // case. Fix to API to avoid this. if (md != ctx->md || key != NULL) { uint8_t pad[EVP_MAX_MD_BLOCK_SIZE]; uint8_t key_block[EVP_MAX_MD_BLOCK_SIZE]; unsigned key_block_len; size_t block_size = EVP_MD_block_size(md); assert(block_size <= sizeof(key_block)); if (block_size < key_len) { // Long keys are hashed. if (!EVP_DigestInit_ex(&ctx->md_ctx, md, impl) || !EVP_DigestUpdate(&ctx->md_ctx, key, key_len) || !EVP_DigestFinal_ex(&ctx->md_ctx, key_block, &key_block_len)) { return 0; } } else { assert(key_len <= sizeof(key_block)); OPENSSL_memcpy(key_block, key, key_len); key_block_len = (unsigned)key_len; } // Keys are then padded with zeros. if (key_block_len != EVP_MAX_MD_BLOCK_SIZE) { OPENSSL_memset(&key_block[key_block_len], 0, sizeof(key_block) - key_block_len); } for (size_t i = 0; i < EVP_MAX_MD_BLOCK_SIZE; i++) { pad[i] = 0x36 ^ key_block[i]; } if (!EVP_DigestInit_ex(&ctx->i_ctx, md, impl) || !EVP_DigestUpdate(&ctx->i_ctx, pad, EVP_MD_block_size(md))) { return 0; } for (size_t i = 0; i < EVP_MAX_MD_BLOCK_SIZE; i++) { pad[i] = 0x5c ^ key_block[i]; } if (!EVP_DigestInit_ex(&ctx->o_ctx, md, impl) || !EVP_DigestUpdate(&ctx->o_ctx, pad, EVP_MD_block_size(md))) { return 0; } ctx->md = md; } if (!EVP_MD_CTX_copy_ex(&ctx->md_ctx, &ctx->i_ctx)) { return 0; } return 1; }
/* ssl_lookup_session looks up |session_id| in the session cache and sets * |*out_session| to an |SSL_SESSION| object if found. The caller takes * ownership of the result. */ static enum ssl_session_result_t ssl_lookup_session( SSL *ssl, SSL_SESSION **out_session, const uint8_t *session_id, size_t session_id_len) { *out_session = NULL; if (session_id_len == 0 || session_id_len > SSL_MAX_SSL_SESSION_ID_LENGTH) { return ssl_session_success; } SSL_SESSION *session = NULL; /* Try the internal cache, if it exists. */ if (!(ssl->session_ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_LOOKUP)) { SSL_SESSION data; data.ssl_version = ssl->version; data.session_id_length = session_id_len; OPENSSL_memcpy(data.session_id, session_id, session_id_len); CRYPTO_MUTEX_lock_read(&ssl->session_ctx->lock); session = lh_SSL_SESSION_retrieve(ssl->session_ctx->sessions, &data); if (session != NULL) { SSL_SESSION_up_ref(session); } /* TODO(davidben): This should probably move it to the front of the list. */ CRYPTO_MUTEX_unlock_read(&ssl->session_ctx->lock); } /* Fall back to the external cache, if it exists. */ if (session == NULL && ssl->session_ctx->get_session_cb != NULL) { int copy = 1; session = ssl->session_ctx->get_session_cb(ssl, (uint8_t *)session_id, session_id_len, ©); if (session == NULL) { return ssl_session_success; } if (session == SSL_magic_pending_session_ptr()) { return ssl_session_retry; } /* Increment reference count now if the session callback asks us to do so * (note that if the session structures returned by the callback are shared * between threads, it must handle the reference count itself [i.e. copy == * 0], or things won't be thread-safe). */ if (copy) { SSL_SESSION_up_ref(session); } /* Add the externally cached session to the internal cache if necessary. */ if (!(ssl->session_ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_STORE)) { SSL_CTX_add_session(ssl->session_ctx, session); } } if (session != NULL && !ssl_session_is_time_valid(ssl, session)) { /* The session was from the cache, so remove it. */ SSL_CTX_remove_session(ssl->session_ctx, session); SSL_SESSION_free(session); session = NULL; } *out_session = session; return ssl_session_success; }
static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) { EVP_AES_GCM_CTX *gctx = c->cipher_data; switch (type) { case EVP_CTRL_INIT: gctx->key_set = 0; gctx->iv_set = 0; gctx->ivlen = c->cipher->iv_len; gctx->iv = c->iv; gctx->taglen = -1; gctx->iv_gen = 0; return 1; case EVP_CTRL_AEAD_SET_IVLEN: if (arg <= 0) { return 0; } // Allocate memory for IV if needed if (arg > EVP_MAX_IV_LENGTH && arg > gctx->ivlen) { if (gctx->iv != c->iv) { OPENSSL_free(gctx->iv); } gctx->iv = OPENSSL_malloc(arg); if (!gctx->iv) { return 0; } } gctx->ivlen = arg; return 1; case EVP_CTRL_AEAD_SET_TAG: if (arg <= 0 || arg > 16 || c->encrypt) { return 0; } OPENSSL_memcpy(c->buf, ptr, arg); gctx->taglen = arg; return 1; case EVP_CTRL_AEAD_GET_TAG: if (arg <= 0 || arg > 16 || !c->encrypt || gctx->taglen < 0) { return 0; } OPENSSL_memcpy(ptr, c->buf, arg); return 1; case EVP_CTRL_AEAD_SET_IV_FIXED: // Special case: -1 length restores whole IV if (arg == -1) { OPENSSL_memcpy(gctx->iv, ptr, gctx->ivlen); gctx->iv_gen = 1; return 1; } // Fixed field must be at least 4 bytes and invocation field // at least 8. if (arg < 4 || (gctx->ivlen - arg) < 8) { return 0; } if (arg) { OPENSSL_memcpy(gctx->iv, ptr, arg); } if (c->encrypt && !RAND_bytes(gctx->iv + arg, gctx->ivlen - arg)) { return 0; } gctx->iv_gen = 1; return 1; case EVP_CTRL_GCM_IV_GEN: if (gctx->iv_gen == 0 || gctx->key_set == 0) { return 0; } CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, gctx->iv, gctx->ivlen); if (arg <= 0 || arg > gctx->ivlen) { arg = gctx->ivlen; } OPENSSL_memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg); // Invocation field will be at least 8 bytes in size and // so no need to check wrap around or increment more than // last 8 bytes. ctr64_inc(gctx->iv + gctx->ivlen - 8); gctx->iv_set = 1; return 1; case EVP_CTRL_GCM_SET_IV_INV: if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt) { return 0; } OPENSSL_memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg); CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, gctx->iv, gctx->ivlen); gctx->iv_set = 1; return 1; case EVP_CTRL_COPY: { EVP_CIPHER_CTX *out = ptr; EVP_AES_GCM_CTX *gctx_out = out->cipher_data; if (gctx->iv == c->iv) { gctx_out->iv = out->iv; } else { gctx_out->iv = OPENSSL_malloc(gctx->ivlen); if (!gctx_out->iv) { return 0; } OPENSSL_memcpy(gctx_out->iv, gctx->iv, gctx->ivlen); } return 1; } default: return -1; } }
SSL_SESSION *SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, CRYPTO_BUFFER_POOL *pool) { SSL_SESSION *ret = ssl_session_new(x509_method); if (ret == NULL) { goto err; } CBS session; uint64_t version, ssl_version; if (!CBS_get_asn1(cbs, &session, CBS_ASN1_SEQUENCE) || !CBS_get_asn1_uint64(&session, &version) || version != kVersion || !CBS_get_asn1_uint64(&session, &ssl_version)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); goto err; } ret->ssl_version = ssl_version; CBS cipher; uint16_t cipher_value; if (!CBS_get_asn1(&session, &cipher, CBS_ASN1_OCTETSTRING) || !CBS_get_u16(&cipher, &cipher_value) || CBS_len(&cipher) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); goto err; } ret->cipher = SSL_get_cipher_by_value(cipher_value); if (ret->cipher == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_CIPHER); goto err; } CBS session_id, master_key; if (!CBS_get_asn1(&session, &session_id, CBS_ASN1_OCTETSTRING) || CBS_len(&session_id) > SSL3_MAX_SSL_SESSION_ID_LENGTH || !CBS_get_asn1(&session, &master_key, CBS_ASN1_OCTETSTRING) || CBS_len(&master_key) > SSL_MAX_MASTER_KEY_LENGTH) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); goto err; } OPENSSL_memcpy(ret->session_id, CBS_data(&session_id), CBS_len(&session_id)); ret->session_id_length = CBS_len(&session_id); OPENSSL_memcpy(ret->master_key, CBS_data(&master_key), CBS_len(&master_key)); ret->master_key_length = CBS_len(&master_key); CBS child; uint64_t timeout; if (!CBS_get_asn1(&session, &child, kTimeTag) || !CBS_get_asn1_uint64(&child, &ret->time) || !CBS_get_asn1(&session, &child, kTimeoutTag) || !CBS_get_asn1_uint64(&child, &timeout) || timeout > UINT32_MAX) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); goto err; } ret->timeout = (uint32_t)timeout; CBS peer; int has_peer; if (!CBS_get_optional_asn1(&session, &peer, &has_peer, kPeerTag) || (has_peer && CBS_len(&peer) == 0)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); goto err; } /* |peer| is processed with the certificate chain. */ if (!SSL_SESSION_parse_bounded_octet_string( &session, ret->sid_ctx, &ret->sid_ctx_length, sizeof(ret->sid_ctx), kSessionIDContextTag) || !SSL_SESSION_parse_long(&session, &ret->verify_result, kVerifyResultTag, X509_V_OK) || !SSL_SESSION_parse_string(&session, &ret->tlsext_hostname, kHostNameTag) || !SSL_SESSION_parse_string(&session, &ret->psk_identity, kPSKIdentityTag) || !SSL_SESSION_parse_u32(&session, &ret->tlsext_tick_lifetime_hint, kTicketLifetimeHintTag, 0) || !SSL_SESSION_parse_octet_string(&session, &ret->tlsext_tick, &ret->tlsext_ticklen, kTicketTag)) { goto err; } if (CBS_peek_asn1_tag(&session, kPeerSHA256Tag)) { CBS peer_sha256; if (!CBS_get_asn1(&session, &child, kPeerSHA256Tag) || !CBS_get_asn1(&child, &peer_sha256, CBS_ASN1_OCTETSTRING) || CBS_len(&peer_sha256) != sizeof(ret->peer_sha256) || CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); goto err; } OPENSSL_memcpy(ret->peer_sha256, CBS_data(&peer_sha256), sizeof(ret->peer_sha256)); ret->peer_sha256_valid = 1; } else { ret->peer_sha256_valid = 0; } if (!SSL_SESSION_parse_bounded_octet_string( &session, ret->original_handshake_hash, &ret->original_handshake_hash_len, sizeof(ret->original_handshake_hash), kOriginalHandshakeHashTag) || !SSL_SESSION_parse_octet_string( &session, &ret->tlsext_signed_cert_timestamp_list, &ret->tlsext_signed_cert_timestamp_list_length, kSignedCertTimestampListTag) || !SSL_SESSION_parse_octet_string( &session, &ret->ocsp_response, &ret->ocsp_response_length, kOCSPResponseTag)) { goto err; } int extended_master_secret; if (!CBS_get_optional_asn1_bool(&session, &extended_master_secret, kExtendedMasterSecretTag, 0 /* default to false */)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); goto err; } ret->extended_master_secret = !!extended_master_secret; if (!SSL_SESSION_parse_u16(&session, &ret->group_id, kGroupIDTag, 0)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); goto err; } CBS cert_chain; CBS_init(&cert_chain, NULL, 0); int has_cert_chain; if (!CBS_get_optional_asn1(&session, &cert_chain, &has_cert_chain, kCertChainTag) || (has_cert_chain && CBS_len(&cert_chain) == 0)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); goto err; } if (has_cert_chain && !has_peer) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); goto err; } if (has_peer || has_cert_chain) { ret->certs = sk_CRYPTO_BUFFER_new_null(); if (ret->certs == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); goto err; } if (has_peer) { /* TODO(agl): this should use the |SSL_CTX|'s pool. */ CRYPTO_BUFFER *buffer = CRYPTO_BUFFER_new_from_CBS(&peer, pool); if (buffer == NULL || !sk_CRYPTO_BUFFER_push(ret->certs, buffer)) { CRYPTO_BUFFER_free(buffer); OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); goto err; } } while (CBS_len(&cert_chain) > 0) { CBS cert; if (!CBS_get_any_asn1_element(&cert_chain, &cert, NULL, NULL) || CBS_len(&cert) == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); goto err; } /* TODO(agl): this should use the |SSL_CTX|'s pool. */ CRYPTO_BUFFER *buffer = CRYPTO_BUFFER_new_from_CBS(&cert, pool); if (buffer == NULL || !sk_CRYPTO_BUFFER_push(ret->certs, buffer)) { CRYPTO_BUFFER_free(buffer); OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); goto err; } } } if (!x509_method->session_cache_objects(ret)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); goto err; } CBS age_add; int age_add_present; if (!CBS_get_optional_asn1_octet_string(&session, &age_add, &age_add_present, kTicketAgeAddTag) || (age_add_present && !CBS_get_u32(&age_add, &ret->ticket_age_add)) || CBS_len(&age_add) != 0) { goto err; } ret->ticket_age_add_valid = age_add_present; int is_server; if (!CBS_get_optional_asn1_bool(&session, &is_server, kIsServerTag, 1 /* default to true */)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); goto err; } /* TODO: in time we can include |is_server| for servers too, then we can enforce that client and server sessions are never mixed up. */ ret->is_server = is_server; if (!SSL_SESSION_parse_u16(&session, &ret->peer_signature_algorithm, kPeerSignatureAlgorithmTag, 0) || !SSL_SESSION_parse_u32(&session, &ret->ticket_max_early_data, kTicketMaxEarlyDataTag, 0) || !SSL_SESSION_parse_u32(&session, &ret->auth_timeout, kAuthTimeoutTag, ret->timeout) || !SSL_SESSION_parse_octet_string(&session, &ret->early_alpn, &ret->early_alpn_len, kEarlyALPNTag) || CBS_len(&session) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); goto err; } return ret; err: SSL_SESSION_free(ret); return NULL; }
void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in, size_t in_len, size_t orig_len) { uint8_t rotated_mac1[EVP_MAX_MD_SIZE], rotated_mac2[EVP_MAX_MD_SIZE]; uint8_t *rotated_mac = rotated_mac1; uint8_t *rotated_mac_tmp = rotated_mac2; /* mac_end is the index of |in| just after the end of the MAC. */ size_t mac_end = in_len; size_t mac_start = mac_end - md_size; assert(orig_len >= in_len); assert(in_len >= md_size); assert(md_size <= EVP_MAX_MD_SIZE); /* scan_start contains the number of bytes that we can ignore because * the MAC's position can only vary by 255 bytes. */ size_t scan_start = 0; /* This information is public so it's safe to branch based on it. */ if (orig_len > md_size + 255 + 1) { scan_start = orig_len - (md_size + 255 + 1); } size_t rotate_offset = 0; uint8_t mac_started = 0; OPENSSL_memset(rotated_mac, 0, md_size); for (size_t i = scan_start, j = 0; i < orig_len; i++, j++) { if (j >= md_size) { j -= md_size; } crypto_word_t is_mac_start = constant_time_eq_w(i, mac_start); mac_started |= is_mac_start; uint8_t mac_ended = constant_time_ge_8(i, mac_end); rotated_mac[j] |= in[i] & mac_started & ~mac_ended; /* Save the offset that |mac_start| is mapped to. */ rotate_offset |= j & is_mac_start; } /* Now rotate the MAC. We rotate in log(md_size) steps, one for each bit * position. */ for (size_t offset = 1; offset < md_size; offset <<= 1, rotate_offset >>= 1) { /* Rotate by |offset| iff the corresponding bit is set in * |rotate_offset|, placing the result in |rotated_mac_tmp|. */ const uint8_t skip_rotate = (rotate_offset & 1) - 1; for (size_t i = 0, j = offset; i < md_size; i++, j++) { if (j >= md_size) { j -= md_size; } rotated_mac_tmp[i] = constant_time_select_8(skip_rotate, rotated_mac[i], rotated_mac[j]); } /* Swap pointers so |rotated_mac| contains the (possibly) rotated value. * Note the number of iterations and thus the identity of these pointers is * public information. */ uint8_t *tmp = rotated_mac; rotated_mac = rotated_mac_tmp; rotated_mac_tmp = tmp; } OPENSSL_memcpy(out, rotated_mac, md_size); }
// rsa_blinding_get returns a BN_BLINDING to use with |rsa|. It does this by // allocating one of the cached BN_BLINDING objects in |rsa->blindings|. If // none are free, the cache will be extended by a extra element and the new // BN_BLINDING is returned. // // On success, the index of the assigned BN_BLINDING is written to // |*index_used| and must be passed to |rsa_blinding_release| when finished. static BN_BLINDING *rsa_blinding_get(RSA *rsa, unsigned *index_used, BN_CTX *ctx) { assert(ctx != NULL); assert(rsa->mont_n != NULL); BN_BLINDING *ret = NULL; BN_BLINDING **new_blindings; uint8_t *new_blindings_inuse; char overflow = 0; CRYPTO_MUTEX_lock_write(&rsa->lock); unsigned i; for (i = 0; i < rsa->num_blindings; i++) { if (rsa->blindings_inuse[i] == 0) { rsa->blindings_inuse[i] = 1; ret = rsa->blindings[i]; *index_used = i; break; } } if (ret != NULL) { CRYPTO_MUTEX_unlock_write(&rsa->lock); return ret; } overflow = rsa->num_blindings >= MAX_BLINDINGS_PER_RSA; // We didn't find a free BN_BLINDING to use so increase the length of // the arrays by one and use the newly created element. CRYPTO_MUTEX_unlock_write(&rsa->lock); ret = BN_BLINDING_new(); if (ret == NULL) { return NULL; } if (overflow) { // We cannot add any more cached BN_BLINDINGs so we use |ret| // and mark it for destruction in |rsa_blinding_release|. *index_used = MAX_BLINDINGS_PER_RSA; return ret; } CRYPTO_MUTEX_lock_write(&rsa->lock); new_blindings = OPENSSL_malloc(sizeof(BN_BLINDING *) * (rsa->num_blindings + 1)); if (new_blindings == NULL) { goto err1; } OPENSSL_memcpy(new_blindings, rsa->blindings, sizeof(BN_BLINDING *) * rsa->num_blindings); new_blindings[rsa->num_blindings] = ret; new_blindings_inuse = OPENSSL_malloc(rsa->num_blindings + 1); if (new_blindings_inuse == NULL) { goto err2; } OPENSSL_memcpy(new_blindings_inuse, rsa->blindings_inuse, rsa->num_blindings); new_blindings_inuse[rsa->num_blindings] = 1; *index_used = rsa->num_blindings; OPENSSL_free(rsa->blindings); rsa->blindings = new_blindings; OPENSSL_free(rsa->blindings_inuse); rsa->blindings_inuse = new_blindings_inuse; rsa->num_blindings++; CRYPTO_MUTEX_unlock_write(&rsa->lock); return ret; err2: OPENSSL_free(new_blindings); err1: CRYPTO_MUTEX_unlock_write(&rsa->lock); BN_BLINDING_free(ret); return NULL; }
static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; if (tls_ctx->cipher_ctx.encrypt) { // Unlike a normal AEAD, a TLS AEAD may only be used in one direction. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } if (in_len < HMAC_size(&tls_ctx->hmac_ctx)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } if (max_out_len < in_len) { // This requires that the caller provide space for the MAC, even though it // will always be removed on return. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (ad_len != 13 - 2 /* length bytes */) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE); return 0; } if (in_len > INT_MAX) { // EVP_CIPHER takes int as input. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } // Configure the explicit IV. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } // Decrypt to get the plaintext + MAC + padding. size_t total = 0; int len; if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; } total += len; if (!EVP_DecryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) { return 0; } total += len; assert(total == in_len); // Remove CBC padding. Code from here on is timing-sensitive with respect to // |padding_ok| and |data_plus_mac_len| for CBC ciphers. size_t data_plus_mac_len; crypto_word_t padding_ok; if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) { if (!EVP_tls_cbc_remove_padding( &padding_ok, &data_plus_mac_len, out, total, EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx), HMAC_size(&tls_ctx->hmac_ctx))) { // Publicly invalid. This can be rejected in non-constant time. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } } else { padding_ok = CONSTTIME_TRUE_W; data_plus_mac_len = total; // |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has // already been checked against the MAC size at the top of the function. assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx)); } size_t data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx); // At this point, if the padding is valid, the first |data_plus_mac_len| bytes // after |out| are the plaintext and MAC. Otherwise, |data_plus_mac_len| is // still large enough to extract a MAC, but it will be irrelevant. // To allow for CBC mode which changes cipher length, |ad| doesn't include the // length for legacy ciphers. uint8_t ad_fixed[13]; OPENSSL_memcpy(ad_fixed, ad, 11); ad_fixed[11] = (uint8_t)(data_len >> 8); ad_fixed[12] = (uint8_t)(data_len & 0xff); ad_len += 2; // Compute the MAC and extract the one in the record. uint8_t mac[EVP_MAX_MD_SIZE]; size_t mac_len; uint8_t record_mac_tmp[EVP_MAX_MD_SIZE]; uint8_t *record_mac; if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx.md)) { if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx.md, mac, &mac_len, ad_fixed, out, data_plus_mac_len, total, tls_ctx->mac_key, tls_ctx->mac_key_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx)); record_mac = record_mac_tmp; EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total); } else { // We should support the constant-time path for all CBC-mode ciphers // implemented. assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE); unsigned mac_len_u; if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) || !HMAC_Update(&tls_ctx->hmac_ctx, ad_fixed, ad_len) || !HMAC_Update(&tls_ctx->hmac_ctx, out, data_len) || !HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len_u)) { return 0; } mac_len = mac_len_u; assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx)); record_mac = &out[data_len]; } // Perform the MAC check and the padding check in constant-time. It should be // safe to simply perform the padding check first, but it would not be under a // different choice of MAC location on padding failure. See // EVP_tls_cbc_remove_padding. crypto_word_t good = constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len), 0); good &= padding_ok; if (!good) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } // End of timing-sensitive code. *out_len = data_len; return 1; }
ASN1_INTEGER *d2i_ASN1_UINTEGER(ASN1_INTEGER **a, const unsigned char **pp, long length) { ASN1_INTEGER *ret = NULL; const unsigned char *p; unsigned char *s; long len; int inf, tag, xclass; int i; if ((a == NULL) || ((*a) == NULL)) { if ((ret = M_ASN1_INTEGER_new()) == NULL) return (NULL); ret->type = V_ASN1_INTEGER; } else ret = (*a); p = *pp; inf = ASN1_get_object(&p, &len, &tag, &xclass, length); if (inf & 0x80) { i = ASN1_R_BAD_OBJECT_HEADER; goto err; } if (tag != V_ASN1_INTEGER) { i = ASN1_R_EXPECTING_AN_INTEGER; goto err; } /* * We must OPENSSL_malloc stuff, even for 0 bytes otherwise it signifies * a missing NULL parameter. */ s = (unsigned char *)OPENSSL_malloc((int)len + 1); if (s == NULL) { i = ERR_R_MALLOC_FAILURE; goto err; } ret->type = V_ASN1_INTEGER; if (len) { if ((*p == 0) && (len != 1)) { p++; len--; } OPENSSL_memcpy(s, p, (int)len); p += len; } if (ret->data != NULL) OPENSSL_free(ret->data); ret->data = s; ret->length = (int)len; if (a != NULL) (*a) = ret; *pp = p; return (ret); err: OPENSSL_PUT_ERROR(ASN1, i); if ((ret != NULL) && ((a == NULL) || (*a != ret))) M_ASN1_INTEGER_free(ret); return (NULL); }
static int bio_write(BIO *bio, const char *buf, int num_) { size_t num = num_; size_t rest; struct bio_bio_st *b; BIO_clear_retry_flags(bio); if (!bio->init || buf == NULL || num == 0) { return 0; } b = bio->ptr; assert(b != NULL); assert(b->peer != NULL); assert(b->buf != NULL); b->request = 0; if (b->closed) { // we already closed OPENSSL_PUT_ERROR(BIO, BIO_R_BROKEN_PIPE); return -1; } assert(b->len <= b->size); if (b->len == b->size) { BIO_set_retry_write(bio); // buffer is full return -1; } // we can write if (num > b->size - b->len) { num = b->size - b->len; } // now write "num" bytes rest = num; assert(rest > 0); // one or two iterations do { size_t write_offset; size_t chunk; assert(b->len + rest <= b->size); write_offset = b->offset + b->len; if (write_offset >= b->size) { write_offset -= b->size; } // b->buf[write_offset] is the first byte we can write to. if (write_offset + rest <= b->size) { chunk = rest; } else { // wrap around ring buffer chunk = b->size - write_offset; } OPENSSL_memcpy(b->buf + write_offset, buf, chunk); b->len += chunk; assert(b->len <= b->size); rest -= chunk; buf += chunk; } while (rest); return num; }
static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, const size_t max_out_tag_len, const uint8_t *nonce, const size_t nonce_len, const uint8_t *in, const size_t in_len, const uint8_t *extra_in, const size_t extra_in_len, const uint8_t *ad, const size_t ad_len) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; if (!tls_ctx->cipher_ctx.encrypt) { // Unlike a normal AEAD, a TLS AEAD may only be used in one direction. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } if (in_len > INT_MAX) { // EVP_CIPHER takes int as input. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < aead_tls_tag_len(ctx, in_len, extra_in_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (ad_len != 13 - 2 /* length bytes */) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE); return 0; } // To allow for CBC mode which changes cipher length, |ad| doesn't include the // length for legacy ciphers. uint8_t ad_extra[2]; ad_extra[0] = (uint8_t)(in_len >> 8); ad_extra[1] = (uint8_t)(in_len & 0xff); // Compute the MAC. This must be first in case the operation is being done // in-place. uint8_t mac[EVP_MAX_MD_SIZE]; unsigned mac_len; if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) || !HMAC_Update(&tls_ctx->hmac_ctx, ad, ad_len) || !HMAC_Update(&tls_ctx->hmac_ctx, ad_extra, sizeof(ad_extra)) || !HMAC_Update(&tls_ctx->hmac_ctx, in, in_len) || !HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len)) { return 0; } // Configure the explicit IV. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } // Encrypt the input. int len; if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; } unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); // Feed the MAC into the cipher in two steps. First complete the final partial // block from encrypting the input and split the result between |out| and // |out_tag|. Then feed the rest. const size_t early_mac_len = (block_size - (in_len % block_size) % block_size); if (early_mac_len != 0) { assert(len + block_size - early_mac_len == in_len); uint8_t buf[EVP_MAX_BLOCK_LENGTH]; int buf_len; if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, buf, &buf_len, mac, (int)early_mac_len)) { return 0; } assert(buf_len == (int)block_size); OPENSSL_memcpy(out + len, buf, block_size - early_mac_len); OPENSSL_memcpy(out_tag, buf + block_size - early_mac_len, early_mac_len); } size_t tag_len = early_mac_len; if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out_tag + tag_len, &len, mac + tag_len, mac_len - tag_len)) { return 0; } tag_len += len; if (block_size > 1) { assert(block_size <= 256); assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE); // Compute padding and feed that into the cipher. uint8_t padding[256]; unsigned padding_len = block_size - ((in_len + mac_len) % block_size); OPENSSL_memset(padding, padding_len - 1, padding_len); if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out_tag + tag_len, &len, padding, (int)padding_len)) { return 0; } tag_len += len; } if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out_tag + tag_len, &len)) { return 0; } assert(len == 0); // Padding is explicit. assert(tag_len == aead_tls_tag_len(ctx, in_len, extra_in_len)); *out_tag_len = tag_len; return 1; }
static int bio_read(BIO *bio, char *buf, int size_) { size_t size = size_; size_t rest; struct bio_bio_st *b, *peer_b; BIO_clear_retry_flags(bio); if (!bio->init) { return 0; } b = bio->ptr; assert(b != NULL); assert(b->peer != NULL); peer_b = b->peer->ptr; assert(peer_b != NULL); assert(peer_b->buf != NULL); peer_b->request = 0; // will be set in "retry_read" situation if (buf == NULL || size == 0) { return 0; } if (peer_b->len == 0) { if (peer_b->closed) { return 0; // writer has closed, and no data is left } else { BIO_set_retry_read(bio); // buffer is empty if (size <= peer_b->size) { peer_b->request = size; } else { // don't ask for more than the peer can // deliver in one write peer_b->request = peer_b->size; } return -1; } } // we can read if (peer_b->len < size) { size = peer_b->len; } // now read "size" bytes rest = size; assert(rest > 0); // one or two iterations do { size_t chunk; assert(rest <= peer_b->len); if (peer_b->offset + rest <= peer_b->size) { chunk = rest; } else { // wrap around ring buffer chunk = peer_b->size - peer_b->offset; } assert(peer_b->offset + chunk <= peer_b->size); OPENSSL_memcpy(buf, peer_b->buf + peer_b->offset, chunk); peer_b->len -= chunk; if (peer_b->len) { peer_b->offset += chunk; assert(peer_b->offset <= peer_b->size); if (peer_b->offset == peer_b->size) { peer_b->offset = 0; } buf += chunk; } else { // buffer now empty, no need to advance "buf" assert(chunk == rest); peer_b->offset = 0; } rest -= chunk; } while (rest); return size; }
int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { int max, min, dif; register BN_ULONG t1, t2, *ap, *bp, *rp; int i, carry; max = bn_minimal_width(a); min = bn_minimal_width(b); dif = max - min; if (dif < 0) // hmm... should not be happening { OPENSSL_PUT_ERROR(BN, BN_R_ARG2_LT_ARG3); return 0; } if (!bn_wexpand(r, max)) { return 0; } ap = a->d; bp = b->d; rp = r->d; carry = 0; for (i = min; i != 0; i--) { t1 = *(ap++); t2 = *(bp++); if (carry) { carry = (t1 <= t2); t1 -= t2 + 1; } else { carry = (t1 < t2); t1 -= t2; } *(rp++) = t1; } if (carry) // subtracted { if (!dif) { // error: a < b return 0; } while (dif) { dif--; t1 = *(ap++); t2 = t1 - 1; *(rp++) = t2; if (t1) { break; } } } if (dif > 0 && rp != ap) { OPENSSL_memcpy(rp, ap, sizeof(*rp) * dif); } r->width = max; r->neg = 0; bn_set_minimal_width(r); return 1; }
void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], block128_f block) { size_t n; union { size_t t[16 / sizeof(size_t)]; uint8_t c[16]; } tmp; assert(key != NULL && ivec != NULL); assert(len == 0 || (in != NULL && out != NULL)); const uintptr_t inptr = (uintptr_t) in; const uintptr_t outptr = (uintptr_t) out; // If |in| and |out| alias, |in| must be ahead. assert(inptr >= outptr || inptr + len <= outptr); if ((inptr >= 32 && outptr <= inptr - 32) || inptr < outptr) { // If |out| is at least two blocks behind |in| or completely disjoint, there // is no need to decrypt to a temporary block. const uint8_t *iv = ivec; if (STRICT_ALIGNMENT && ((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) { while (len >= 16) { (*block)(in, out, key); for (n = 0; n < 16; ++n) { out[n] ^= iv[n]; } iv = in; len -= 16; in += 16; out += 16; } } else if (16 % sizeof(size_t) == 0) { // always true while (len >= 16) { size_t *out_t = (size_t *)out, *iv_t = (size_t *)iv; (*block)(in, out, key); for (n = 0; n < 16 / sizeof(size_t); n++) { out_t[n] ^= iv_t[n]; } iv = in; len -= 16; in += 16; out += 16; } } OPENSSL_memcpy(ivec, iv, 16); } else { // |out| is less than two blocks behind |in|. Decrypting an input block // directly to |out| would overwrite a ciphertext block before it is used as // the next block's IV. Decrypt to a temporary block instead. if (STRICT_ALIGNMENT && ((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) { uint8_t c; while (len >= 16) { (*block)(in, tmp.c, key); for (n = 0; n < 16; ++n) { c = in[n]; out[n] = tmp.c[n] ^ ivec[n]; ivec[n] = c; } len -= 16; in += 16; out += 16; } } else if (16 % sizeof(size_t) == 0) { // always true while (len >= 16) { size_t c, *out_t = (size_t *)out, *ivec_t = (size_t *)ivec; const size_t *in_t = (const size_t *)in; (*block)(in, tmp.c, key); for (n = 0; n < 16 / sizeof(size_t); n++) { c = in_t[n]; out_t[n] = tmp.t[n] ^ ivec_t[n]; ivec_t[n] = c; } len -= 16; in += 16; out += 16; } } } while (len) { uint8_t c; (*block)(in, tmp.c, key); for (n = 0; n < 16 && n < len; ++n) { c = in[n]; out[n] = tmp.c[n] ^ ivec[n]; ivec[n] = c; } if (len <= 16) { for (; n < 16; ++n) { ivec[n] = in[n]; } break; } len -= 16; in += 16; out += 16; } }
int i2c_ASN1_INTEGER(ASN1_INTEGER *a, unsigned char **pp) { int pad = 0, ret, i, neg; unsigned char *p, *n, pb = 0; if (a == NULL) return (0); neg = a->type & V_ASN1_NEG; if (a->length == 0) ret = 1; else { ret = a->length; i = a->data[0]; if (ret == 1 && i == 0) neg = 0; if (!neg && (i > 127)) { pad = 1; pb = 0; } else if (neg) { if (i > 128) { pad = 1; pb = 0xFF; } else if (i == 128) { /* * Special case: if any other bytes non zero we pad: * otherwise we don't. */ for (i = 1; i < a->length; i++) if (a->data[i]) { pad = 1; pb = 0xFF; break; } } } ret += pad; } if (pp == NULL) return (ret); p = *pp; if (pad) *(p++) = pb; if (a->length == 0) *(p++) = 0; else if (!neg) OPENSSL_memcpy(p, a->data, (unsigned int)a->length); else { /* Begin at the end of the encoding */ n = a->data + a->length - 1; p += a->length - 1; i = a->length; /* Copy zeros to destination as long as source is zero */ while (!*n && i > 1) { *(p--) = 0; n--; i--; } /* Complement and increment next octet */ *(p--) = ((*(n--)) ^ 0xff) + 1; i--; /* Complement any octets left */ for (; i > 0; i--) *(p--) = *(n--) ^ 0xff; } *pp += ret; return (ret); }
static int b64_read(BIO *b, char *out, int outl) { int ret = 0, i, ii, j, k, x, n, num, ret_code = 0; BIO_B64_CTX *ctx; uint8_t *p, *q; if (out == NULL) { return 0; } ctx = (BIO_B64_CTX *) b->ptr; if (ctx == NULL || b->next_bio == NULL) { return 0; } BIO_clear_retry_flags(b); if (ctx->encode != B64_DECODE) { ctx->encode = B64_DECODE; ctx->buf_len = 0; ctx->buf_off = 0; ctx->tmp_len = 0; EVP_DecodeInit(&ctx->base64); } // First check if there are bytes decoded/encoded if (ctx->buf_len > 0) { assert(ctx->buf_len >= ctx->buf_off); i = ctx->buf_len - ctx->buf_off; if (i > outl) { i = outl; } assert(ctx->buf_off + i < (int)sizeof(ctx->buf)); OPENSSL_memcpy(out, &ctx->buf[ctx->buf_off], i); ret = i; out += i; outl -= i; ctx->buf_off += i; if (ctx->buf_len == ctx->buf_off) { ctx->buf_len = 0; ctx->buf_off = 0; } } // At this point, we have room of outl bytes and an empty buffer, so we // should read in some more. ret_code = 0; while (outl > 0) { if (ctx->cont <= 0) { break; } i = BIO_read(b->next_bio, &(ctx->tmp[ctx->tmp_len]), B64_BLOCK_SIZE - ctx->tmp_len); if (i <= 0) { ret_code = i; // Should we continue next time we are called? if (!BIO_should_retry(b->next_bio)) { ctx->cont = i; // If buffer empty break if (ctx->tmp_len == 0) { break; } else { // Fall through and process what we have i = 0; } } else { // else we retry and add more data to buffer break; } } i += ctx->tmp_len; ctx->tmp_len = i; // We need to scan, a line at a time until we have a valid line if we are // starting. if (ctx->start && (BIO_test_flags(b, BIO_FLAGS_BASE64_NO_NL))) { // ctx->start = 1; ctx->tmp_len = 0; } else if (ctx->start) { q = p = (uint8_t *)ctx->tmp; num = 0; for (j = 0; j < i; j++) { if (*(q++) != '\n') { continue; } // due to a previous very long line, we need to keep on scanning for a // '\n' before we even start looking for base64 encoded stuff. if (ctx->tmp_nl) { p = q; ctx->tmp_nl = 0; continue; } k = EVP_DecodeUpdate(&(ctx->base64), (uint8_t *)ctx->buf, &num, p, q - p); if (k <= 0 && num == 0 && ctx->start) { EVP_DecodeInit(&ctx->base64); } else { if (p != (uint8_t *)&(ctx->tmp[0])) { i -= (p - (uint8_t *)&(ctx->tmp[0])); for (x = 0; x < i; x++) { ctx->tmp[x] = p[x]; } } EVP_DecodeInit(&ctx->base64); ctx->start = 0; break; } p = q; } // we fell off the end without starting if (j == i && num == 0) { // Is this is one long chunk?, if so, keep on reading until a new // line. if (p == (uint8_t *)&(ctx->tmp[0])) { // Check buffer full if (i == B64_BLOCK_SIZE) { ctx->tmp_nl = 1; ctx->tmp_len = 0; } } else if (p != q) { // finished on a '\n' n = q - p; for (ii = 0; ii < n; ii++) { ctx->tmp[ii] = p[ii]; } ctx->tmp_len = n; } // else finished on a '\n' continue; } else { ctx->tmp_len = 0; } } else if (i < B64_BLOCK_SIZE && ctx->cont > 0) { // If buffer isn't full and we can retry then restart to read in more // data. continue; } if (BIO_test_flags(b, BIO_FLAGS_BASE64_NO_NL)) { int z, jj; jj = i & ~3; // process per 4 z = EVP_DecodeBlock((uint8_t *)ctx->buf, (uint8_t *)ctx->tmp, jj); if (jj > 2) { if (ctx->tmp[jj - 1] == '=') { z--; if (ctx->tmp[jj - 2] == '=') { z--; } } } // z is now number of output bytes and jj is the number consumed. if (jj != i) { OPENSSL_memmove(ctx->tmp, &ctx->tmp[jj], i - jj); ctx->tmp_len = i - jj; } ctx->buf_len = 0; if (z > 0) { ctx->buf_len = z; } i = z; } else { i = EVP_DecodeUpdate(&(ctx->base64), (uint8_t *)ctx->buf, &ctx->buf_len, (uint8_t *)ctx->tmp, i); ctx->tmp_len = 0; } ctx->buf_off = 0; if (i < 0) { ret_code = 0; ctx->buf_len = 0; break; } if (ctx->buf_len <= outl) { i = ctx->buf_len; } else { i = outl; } OPENSSL_memcpy(out, ctx->buf, i); ret += i; ctx->buf_off = i; if (ctx->buf_off == ctx->buf_len) { ctx->buf_len = 0; ctx->buf_off = 0; } outl -= i; out += i; } BIO_copy_next_retry(b); return ret == 0 ? ret_code : ret; }
ASN1_INTEGER *c2i_ASN1_INTEGER(ASN1_INTEGER **a, const unsigned char **pp, long len) { ASN1_INTEGER *ret = NULL; const unsigned char *p, *pend; unsigned char *to, *s; int i; if ((a == NULL) || ((*a) == NULL)) { if ((ret = M_ASN1_INTEGER_new()) == NULL) return (NULL); ret->type = V_ASN1_INTEGER; } else ret = (*a); p = *pp; pend = p + len; /* * We must OPENSSL_malloc stuff, even for 0 bytes otherwise it signifies * a missing NULL parameter. */ s = (unsigned char *)OPENSSL_malloc((int)len + 1); if (s == NULL) { i = ERR_R_MALLOC_FAILURE; goto err; } to = s; if (!len) { /* * Strictly speaking this is an illegal INTEGER but we tolerate it. */ ret->type = V_ASN1_INTEGER; } else if (*p & 0x80) { /* a negative number */ ret->type = V_ASN1_NEG_INTEGER; if ((*p == 0xff) && (len != 1)) { p++; len--; } i = len; p += i - 1; to += i - 1; while ((!*p) && i) { *(to--) = 0; i--; p--; } /* * Special case: if all zeros then the number will be of the form FF * followed by n zero bytes: this corresponds to 1 followed by n zero * bytes. We've already written n zeros so we just append an extra * one and set the first byte to a 1. This is treated separately * because it is the only case where the number of bytes is larger * than len. */ if (!i) { *s = 1; s[len] = 0; len++; } else { *(to--) = (*(p--) ^ 0xff) + 1; i--; for (; i > 0; i--) *(to--) = *(p--) ^ 0xff; } } else { ret->type = V_ASN1_INTEGER; if ((*p == 0) && (len != 1)) { p++; len--; } OPENSSL_memcpy(s, p, (int)len); } if (ret->data != NULL) OPENSSL_free(ret->data); ret->data = s; ret->length = (int)len; if (a != NULL) (*a) = ret; *pp = pend; return (ret); err: OPENSSL_PUT_ERROR(ASN1, i); if ((ret != NULL) && ((a == NULL) || (*a != ret))) M_ASN1_INTEGER_free(ret); return (NULL); }
static int b64_write(BIO *b, const char *in, int inl) { int ret = 0, n, i; BIO_B64_CTX *ctx; ctx = (BIO_B64_CTX *)b->ptr; BIO_clear_retry_flags(b); if (ctx->encode != B64_ENCODE) { ctx->encode = B64_ENCODE; ctx->buf_len = 0; ctx->buf_off = 0; ctx->tmp_len = 0; EVP_EncodeInit(&(ctx->base64)); } assert(ctx->buf_off < (int)sizeof(ctx->buf)); assert(ctx->buf_len <= (int)sizeof(ctx->buf)); assert(ctx->buf_len >= ctx->buf_off); n = ctx->buf_len - ctx->buf_off; while (n > 0) { i = BIO_write(b->next_bio, &(ctx->buf[ctx->buf_off]), n); if (i <= 0) { BIO_copy_next_retry(b); return i; } assert(i <= n); ctx->buf_off += i; assert(ctx->buf_off <= (int)sizeof(ctx->buf)); assert(ctx->buf_len >= ctx->buf_off); n -= i; } // at this point all pending data has been written. ctx->buf_off = 0; ctx->buf_len = 0; if (in == NULL || inl <= 0) { return 0; } while (inl > 0) { n = (inl > B64_BLOCK_SIZE) ? B64_BLOCK_SIZE : inl; if (BIO_test_flags(b, BIO_FLAGS_BASE64_NO_NL)) { if (ctx->tmp_len > 0) { assert(ctx->tmp_len <= 3); n = 3 - ctx->tmp_len; // There's a theoretical possibility of this. if (n > inl) { n = inl; } OPENSSL_memcpy(&(ctx->tmp[ctx->tmp_len]), in, n); ctx->tmp_len += n; ret += n; if (ctx->tmp_len < 3) { break; } ctx->buf_len = EVP_EncodeBlock((uint8_t *)ctx->buf, (uint8_t *)ctx->tmp, ctx->tmp_len); assert(ctx->buf_len <= (int)sizeof(ctx->buf)); assert(ctx->buf_len >= ctx->buf_off); // Since we're now done using the temporary buffer, the length should // be zeroed. ctx->tmp_len = 0; } else { if (n < 3) { OPENSSL_memcpy(ctx->tmp, in, n); ctx->tmp_len = n; ret += n; break; } n -= n % 3; ctx->buf_len = EVP_EncodeBlock((uint8_t *)ctx->buf, (const uint8_t *)in, n); assert(ctx->buf_len <= (int)sizeof(ctx->buf)); assert(ctx->buf_len >= ctx->buf_off); ret += n; } } else { EVP_EncodeUpdate(&(ctx->base64), (uint8_t *)ctx->buf, &ctx->buf_len, (uint8_t *)in, n); assert(ctx->buf_len <= (int)sizeof(ctx->buf)); assert(ctx->buf_len >= ctx->buf_off); ret += n; } inl -= n; in += n; ctx->buf_off = 0; n = ctx->buf_len; while (n > 0) { i = BIO_write(b->next_bio, &(ctx->buf[ctx->buf_off]), n); if (i <= 0) { BIO_copy_next_retry(b); return ret == 0 ? i : ret; } assert(i <= n); n -= i; ctx->buf_off += i; assert(ctx->buf_off <= (int)sizeof(ctx->buf)); assert(ctx->buf_len >= ctx->buf_off); } ctx->buf_len = 0; ctx->buf_off = 0; } return ret; }
char *X509_NAME_oneline(X509_NAME *a, char *buf, int len) { X509_NAME_ENTRY *ne; size_t i; int n, lold, l, l1, l2, num, j, type; const char *s; char *p; unsigned char *q; BUF_MEM *b = NULL; static const char hex[17] = "0123456789ABCDEF"; int gs_doit[4]; char tmp_buf[80]; if (buf == NULL) { if ((b = BUF_MEM_new()) == NULL) goto err; if (!BUF_MEM_grow(b, 200)) goto err; b->data[0] = '\0'; len = 200; } else if (len <= 0) { return NULL; } if (a == NULL) { if (b) { buf = b->data; OPENSSL_free(b); } strncpy(buf, "NO X509_NAME", len); buf[len - 1] = '\0'; return buf; } len--; /* space for '\0' */ l = 0; for (i = 0; i < sk_X509_NAME_ENTRY_num(a->entries); i++) { ne = sk_X509_NAME_ENTRY_value(a->entries, i); n = OBJ_obj2nid(ne->object); if ((n == NID_undef) || ((s = OBJ_nid2sn(n)) == NULL)) { i2t_ASN1_OBJECT(tmp_buf, sizeof(tmp_buf), ne->object); s = tmp_buf; } l1 = strlen(s); type = ne->value->type; num = ne->value->length; if (num > NAME_ONELINE_MAX) { OPENSSL_PUT_ERROR(X509, X509_R_NAME_TOO_LONG); goto end; } q = ne->value->data; if ((type == V_ASN1_GENERALSTRING) && ((num % 4) == 0)) { gs_doit[0] = gs_doit[1] = gs_doit[2] = gs_doit[3] = 0; for (j = 0; j < num; j++) if (q[j] != 0) gs_doit[j & 3] = 1; if (gs_doit[0] | gs_doit[1] | gs_doit[2]) gs_doit[0] = gs_doit[1] = gs_doit[2] = gs_doit[3] = 1; else { gs_doit[0] = gs_doit[1] = gs_doit[2] = 0; gs_doit[3] = 1; } } else gs_doit[0] = gs_doit[1] = gs_doit[2] = gs_doit[3] = 1; for (l2 = j = 0; j < num; j++) { if (!gs_doit[j & 3]) continue; l2++; if ((q[j] < ' ') || (q[j] > '~')) l2 += 3; } lold = l; l += 1 + l1 + 1 + l2; if (l > NAME_ONELINE_MAX) { OPENSSL_PUT_ERROR(X509, X509_R_NAME_TOO_LONG); goto end; } if (b != NULL) { if (!BUF_MEM_grow(b, l + 1)) goto err; p = &(b->data[lold]); } else if (l > len) { break; } else p = &(buf[lold]); *(p++) = '/'; OPENSSL_memcpy(p, s, (unsigned int)l1); p += l1; *(p++) = '='; q = ne->value->data; for (j = 0; j < num; j++) { if (!gs_doit[j & 3]) continue; n = q[j]; if ((n < ' ') || (n > '~')) { *(p++) = '\\'; *(p++) = 'x'; *(p++) = hex[(n >> 4) & 0x0f]; *(p++) = hex[n & 0x0f]; } else *(p++) = n; }
static int chacha20_poly1305_seal_scatter( const uint8_t *key, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len, size_t tag_len) { if (extra_in_len + tag_len < tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < tag_len + extra_in_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != 12) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } // |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow // individual operations that work on more than 256GB at a time. // |in_len_64| is needed because, on 32-bit platforms, size_t is only // 32-bits and this produces a warning because it's always false. // Casting to uint64_t inside the conditional is not sufficient to stop // the warning. const uint64_t in_len_64 = in_len; if (in_len_64 >= (UINT64_C(1) << 32) * 64 - 64) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } // The the extra input is given, it is expected to be very short and so is // encrypted byte-by-byte first. if (extra_in_len) { static const size_t kChaChaBlockSize = 64; uint32_t block_counter = 1 + (in_len / kChaChaBlockSize); size_t offset = in_len % kChaChaBlockSize; uint8_t block[64 /* kChaChaBlockSize */]; for (size_t done = 0; done < extra_in_len; block_counter++) { memset(block, 0, sizeof(block)); CRYPTO_chacha_20(block, block, sizeof(block), key, nonce, block_counter); for (size_t i = offset; i < sizeof(block) && done < extra_in_len; i++, done++) { out_tag[done] = extra_in[done] ^ block[i]; } offset = 0; } } union seal_data data; if (asm_capable()) { OPENSSL_memcpy(data.in.key, key, 32); data.in.counter = 0; OPENSSL_memcpy(data.in.nonce, nonce, 12); data.in.extra_ciphertext = out_tag; data.in.extra_ciphertext_len = extra_in_len; chacha20_poly1305_seal(out, in, in_len, ad, ad_len, &data); } else { CRYPTO_chacha_20(out, in, in_len, key, nonce, 1); calc_tag(data.out.tag, key, nonce, ad, ad_len, out, in_len, out_tag, extra_in_len); } OPENSSL_memcpy(out_tag + extra_in_len, data.out.tag, tag_len); *out_tag_len = extra_in_len + tag_len; return 1; }
int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, size_t *md_out_size, const uint8_t header[13], const uint8_t *data, size_t data_plus_mac_size, size_t data_plus_mac_plus_padding_size, const uint8_t *mac_secret, unsigned mac_secret_length) { HASH_CTX md_state; void (*md_final_raw)(HASH_CTX *ctx, uint8_t *md_out); void (*md_transform)(HASH_CTX *ctx, const uint8_t *block); unsigned md_size, md_block_size = 64; /* md_length_size is the number of bytes in the length field that terminates * the hash. */ unsigned md_length_size = 8; /* Bound the acceptable input so we can forget about many possible overflows * later in this function. This is redundant with the record size limits in * TLS. */ if (data_plus_mac_plus_padding_size >= 1024 * 1024) { assert(0); return 0; } switch (EVP_MD_type(md)) { case NID_sha1: SHA1_Init(&md_state.sha1); md_final_raw = tls1_sha1_final_raw; md_transform = tls1_sha1_transform; md_size = SHA_DIGEST_LENGTH; break; case NID_sha256: SHA256_Init(&md_state.sha256); md_final_raw = tls1_sha256_final_raw; md_transform = tls1_sha256_transform; md_size = SHA256_DIGEST_LENGTH; break; case NID_sha384: SHA384_Init(&md_state.sha512); md_final_raw = tls1_sha512_final_raw; md_transform = tls1_sha512_transform; md_size = SHA384_DIGEST_LENGTH; md_block_size = 128; md_length_size = 16; break; default: /* EVP_tls_cbc_record_digest_supported should have been called first to * check that the hash function is supported. */ assert(0); *md_out_size = 0; return 0; } assert(md_length_size <= MAX_HASH_BIT_COUNT_BYTES); assert(md_block_size <= MAX_HASH_BLOCK_SIZE); assert(md_size <= EVP_MAX_MD_SIZE); static const size_t kHeaderLength = 13; /* kVarianceBlocks is the number of blocks of the hash that we have to * calculate in constant time because they could be altered by the * padding value. * * TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not * required to be minimal. Therefore we say that the final six blocks * can vary based on the padding. */ static const size_t kVarianceBlocks = 6; /* From now on we're dealing with the MAC, which conceptually has 13 * bytes of `header' before the start of the data. */ size_t len = data_plus_mac_plus_padding_size + kHeaderLength; /* max_mac_bytes contains the maximum bytes of bytes in the MAC, including * |header|, assuming that there's no padding. */ size_t max_mac_bytes = len - md_size - 1; /* num_blocks is the maximum number of hash blocks. */ size_t num_blocks = (max_mac_bytes + 1 + md_length_size + md_block_size - 1) / md_block_size; /* In order to calculate the MAC in constant time we have to handle * the final blocks specially because the padding value could cause the * end to appear somewhere in the final |kVarianceBlocks| blocks and we * can't leak where. However, |num_starting_blocks| worth of data can * be hashed right away because no padding value can affect whether * they are plaintext. */ size_t num_starting_blocks = 0; /* k is the starting byte offset into the conceptual header||data where * we start processing. */ size_t k = 0; /* mac_end_offset is the index just past the end of the data to be * MACed. */ size_t mac_end_offset = data_plus_mac_size + kHeaderLength - md_size; /* c is the index of the 0x80 byte in the final hash block that * contains application data. */ size_t c = mac_end_offset % md_block_size; /* index_a is the hash block number that contains the 0x80 terminating * value. */ size_t index_a = mac_end_offset / md_block_size; /* index_b is the hash block number that contains the 64-bit hash * length, in bits. */ size_t index_b = (mac_end_offset + md_length_size) / md_block_size; if (num_blocks > kVarianceBlocks) { num_starting_blocks = num_blocks - kVarianceBlocks; k = md_block_size * num_starting_blocks; } /* bits is the hash-length in bits. It includes the additional hash * block for the masked HMAC key. */ size_t bits = 8 * mac_end_offset; /* at most 18 bits to represent */ /* Compute the initial HMAC block. */ bits += 8 * md_block_size; /* hmac_pad is the masked HMAC key. */ uint8_t hmac_pad[MAX_HASH_BLOCK_SIZE]; OPENSSL_memset(hmac_pad, 0, md_block_size); assert(mac_secret_length <= sizeof(hmac_pad)); OPENSSL_memcpy(hmac_pad, mac_secret, mac_secret_length); for (size_t i = 0; i < md_block_size; i++) { hmac_pad[i] ^= 0x36; } md_transform(&md_state, hmac_pad); /* The length check means |bits| fits in four bytes. */ uint8_t length_bytes[MAX_HASH_BIT_COUNT_BYTES]; OPENSSL_memset(length_bytes, 0, md_length_size - 4); length_bytes[md_length_size - 4] = (uint8_t)(bits >> 24); length_bytes[md_length_size - 3] = (uint8_t)(bits >> 16); length_bytes[md_length_size - 2] = (uint8_t)(bits >> 8); length_bytes[md_length_size - 1] = (uint8_t)bits; if (k > 0) { /* k is a multiple of md_block_size. */ uint8_t first_block[MAX_HASH_BLOCK_SIZE]; OPENSSL_memcpy(first_block, header, 13); OPENSSL_memcpy(first_block + 13, data, md_block_size - 13); md_transform(&md_state, first_block); for (size_t i = 1; i < k / md_block_size; i++) { md_transform(&md_state, data + md_block_size * i - 13); } } uint8_t mac_out[EVP_MAX_MD_SIZE]; OPENSSL_memset(mac_out, 0, sizeof(mac_out)); /* We now process the final hash blocks. For each block, we construct * it in constant time. If the |i==index_a| then we'll include the 0x80 * bytes and zero pad etc. For each block we selectively copy it, in * constant time, to |mac_out|. */ for (size_t i = num_starting_blocks; i <= num_starting_blocks + kVarianceBlocks; i++) { uint8_t block[MAX_HASH_BLOCK_SIZE]; uint8_t is_block_a = constant_time_eq_8(i, index_a); uint8_t is_block_b = constant_time_eq_8(i, index_b); for (size_t j = 0; j < md_block_size; j++) { uint8_t b = 0; if (k < kHeaderLength) { b = header[k]; } else if (k < data_plus_mac_plus_padding_size + kHeaderLength) { b = data[k - kHeaderLength]; } k++; uint8_t is_past_c = is_block_a & constant_time_ge_8(j, c); uint8_t is_past_cp1 = is_block_a & constant_time_ge_8(j, c + 1); /* If this is the block containing the end of the * application data, and we are at the offset for the * 0x80 value, then overwrite b with 0x80. */ b = constant_time_select_8(is_past_c, 0x80, b); /* If this the the block containing the end of the * application data and we're past the 0x80 value then * just write zero. */ b = b & ~is_past_cp1; /* If this is index_b (the final block), but not * index_a (the end of the data), then the 64-bit * length didn't fit into index_a and we're having to * add an extra block of zeros. */ b &= ~is_block_b | is_block_a; /* The final bytes of one of the blocks contains the * length. */ if (j >= md_block_size - md_length_size) { /* If this is index_b, write a length byte. */ b = constant_time_select_8( is_block_b, length_bytes[j - (md_block_size - md_length_size)], b); } block[j] = b; } md_transform(&md_state, block); md_final_raw(&md_state, block); /* If this is index_b, copy the hash value to |mac_out|. */ for (size_t j = 0; j < md_size; j++) { mac_out[j] |= block[j] & is_block_b; } } EVP_MD_CTX md_ctx; EVP_MD_CTX_init(&md_ctx); if (!EVP_DigestInit_ex(&md_ctx, md, NULL /* engine */)) { EVP_MD_CTX_cleanup(&md_ctx); return 0; } /* Complete the HMAC in the standard manner. */ for (size_t i = 0; i < md_block_size; i++) { hmac_pad[i] ^= 0x6a; } EVP_DigestUpdate(&md_ctx, hmac_pad, md_block_size); EVP_DigestUpdate(&md_ctx, mac_out, md_size); unsigned md_out_size_u; EVP_DigestFinal(&md_ctx, md_out, &md_out_size_u); *md_out_size = md_out_size_u; EVP_MD_CTX_cleanup(&md_ctx); return 1; }
SSL_SESSION *SSL_SESSION_dup(SSL_SESSION *session, int dup_flags) { SSL_SESSION *new_session = ssl_session_new(session->x509_method); if (new_session == NULL) { goto err; } new_session->is_server = session->is_server; new_session->ssl_version = session->ssl_version; new_session->sid_ctx_length = session->sid_ctx_length; OPENSSL_memcpy(new_session->sid_ctx, session->sid_ctx, session->sid_ctx_length); /* Copy the key material. */ new_session->master_key_length = session->master_key_length; OPENSSL_memcpy(new_session->master_key, session->master_key, session->master_key_length); new_session->cipher = session->cipher; /* Copy authentication state. */ if (session->psk_identity != NULL) { new_session->psk_identity = BUF_strdup(session->psk_identity); if (new_session->psk_identity == NULL) { goto err; } } if (session->certs != NULL) { new_session->certs = sk_CRYPTO_BUFFER_new_null(); if (new_session->certs == NULL) { goto err; } for (size_t i = 0; i < sk_CRYPTO_BUFFER_num(session->certs); i++) { CRYPTO_BUFFER *buffer = sk_CRYPTO_BUFFER_value(session->certs, i); if (!sk_CRYPTO_BUFFER_push(new_session->certs, buffer)) { goto err; } CRYPTO_BUFFER_up_ref(buffer); } } if (!session->x509_method->session_dup(new_session, session)) { goto err; } new_session->verify_result = session->verify_result; new_session->ocsp_response_length = session->ocsp_response_length; if (session->ocsp_response != NULL) { new_session->ocsp_response = BUF_memdup(session->ocsp_response, session->ocsp_response_length); if (new_session->ocsp_response == NULL) { goto err; } } new_session->tlsext_signed_cert_timestamp_list_length = session->tlsext_signed_cert_timestamp_list_length; if (session->tlsext_signed_cert_timestamp_list != NULL) { new_session->tlsext_signed_cert_timestamp_list = BUF_memdup(session->tlsext_signed_cert_timestamp_list, session->tlsext_signed_cert_timestamp_list_length); if (new_session->tlsext_signed_cert_timestamp_list == NULL) { goto err; } } OPENSSL_memcpy(new_session->peer_sha256, session->peer_sha256, SHA256_DIGEST_LENGTH); new_session->peer_sha256_valid = session->peer_sha256_valid; if (session->tlsext_hostname != NULL) { new_session->tlsext_hostname = BUF_strdup(session->tlsext_hostname); if (new_session->tlsext_hostname == NULL) { goto err; } } new_session->peer_signature_algorithm = session->peer_signature_algorithm; new_session->timeout = session->timeout; new_session->auth_timeout = session->auth_timeout; new_session->time = session->time; /* Copy non-authentication connection properties. */ if (dup_flags & SSL_SESSION_INCLUDE_NONAUTH) { new_session->session_id_length = session->session_id_length; OPENSSL_memcpy(new_session->session_id, session->session_id, session->session_id_length); new_session->group_id = session->group_id; OPENSSL_memcpy(new_session->original_handshake_hash, session->original_handshake_hash, session->original_handshake_hash_len); new_session->original_handshake_hash_len = session->original_handshake_hash_len; new_session->tlsext_tick_lifetime_hint = session->tlsext_tick_lifetime_hint; new_session->ticket_age_add = session->ticket_age_add; new_session->ticket_max_early_data = session->ticket_max_early_data; new_session->extended_master_secret = session->extended_master_secret; if (session->early_alpn != NULL) { new_session->early_alpn = BUF_memdup(session->early_alpn, session->early_alpn_len); if (new_session->early_alpn == NULL) { goto err; } } new_session->early_alpn_len = session->early_alpn_len; } /* Copy the ticket. */ if (dup_flags & SSL_SESSION_INCLUDE_TICKET) { if (session->tlsext_tick != NULL) { new_session->tlsext_tick = BUF_memdup(session->tlsext_tick, session->tlsext_ticklen); if (new_session->tlsext_tick == NULL) { goto err; } } new_session->tlsext_ticklen = session->tlsext_ticklen; } /* The new_session does not get a copy of the ex_data. */ new_session->not_resumable = 1; return new_session; err: SSL_SESSION_free(new_session); OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); return 0; }
static int ssl3_prf(uint8_t *out, size_t out_len, const uint8_t *secret, size_t secret_len, const char *label, size_t label_len, const uint8_t *seed1, size_t seed1_len, const uint8_t *seed2, size_t seed2_len) { EVP_MD_CTX md5; EVP_MD_CTX sha1; uint8_t buf[16], smd[SHA_DIGEST_LENGTH]; uint8_t c = 'A'; size_t i, j, k; k = 0; EVP_MD_CTX_init(&md5); EVP_MD_CTX_init(&sha1); for (i = 0; i < out_len; i += MD5_DIGEST_LENGTH) { k++; if (k > sizeof(buf)) { /* bug: 'buf' is too small for this ciphersuite */ OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return 0; } for (j = 0; j < k; j++) { buf[j] = c; } c++; if (!EVP_DigestInit_ex(&sha1, EVP_sha1(), NULL)) { OPENSSL_PUT_ERROR(SSL, ERR_LIB_EVP); return 0; } EVP_DigestUpdate(&sha1, buf, k); EVP_DigestUpdate(&sha1, secret, secret_len); /* |label| is ignored for SSLv3. */ if (seed1_len) { EVP_DigestUpdate(&sha1, seed1, seed1_len); } if (seed2_len) { EVP_DigestUpdate(&sha1, seed2, seed2_len); } EVP_DigestFinal_ex(&sha1, smd, NULL); if (!EVP_DigestInit_ex(&md5, EVP_md5(), NULL)) { OPENSSL_PUT_ERROR(SSL, ERR_LIB_EVP); return 0; } EVP_DigestUpdate(&md5, secret, secret_len); EVP_DigestUpdate(&md5, smd, SHA_DIGEST_LENGTH); if (i + MD5_DIGEST_LENGTH > out_len) { EVP_DigestFinal_ex(&md5, smd, NULL); OPENSSL_memcpy(out, smd, out_len - i); } else { EVP_DigestFinal_ex(&md5, out, NULL); } out += MD5_DIGEST_LENGTH; } OPENSSL_cleanse(smd, SHA_DIGEST_LENGTH); EVP_MD_CTX_cleanup(&md5); EVP_MD_CTX_cleanup(&sha1); return 1; }
int ssl_get_new_session(SSL_HANDSHAKE *hs, int is_server) { SSL *const ssl = hs->ssl; if (ssl->mode & SSL_MODE_NO_SESSION_CREATION) { OPENSSL_PUT_ERROR(SSL, SSL_R_SESSION_MAY_NOT_BE_CREATED); return 0; } SSL_SESSION *session = ssl_session_new(ssl->ctx->x509_method); if (session == NULL) { return 0; } session->is_server = is_server; session->ssl_version = ssl->version; /* Fill in the time from the |SSL_CTX|'s clock. */ struct OPENSSL_timeval now; ssl_get_current_time(ssl, &now); session->time = now.tv_sec; uint16_t version = ssl3_protocol_version(ssl); if (version >= TLS1_3_VERSION) { /* TLS 1.3 uses tickets as authenticators, so we are willing to use them for * longer. */ session->timeout = ssl->session_ctx->session_psk_dhe_timeout; session->auth_timeout = SSL_DEFAULT_SESSION_AUTH_TIMEOUT; } else { /* TLS 1.2 resumption does not incorporate new key material, so we use a * much shorter timeout. */ session->timeout = ssl->session_ctx->session_timeout; session->auth_timeout = ssl->session_ctx->session_timeout; } if (is_server) { if (hs->ticket_expected || version >= TLS1_3_VERSION) { /* Don't set session IDs for sessions resumed with tickets. This will keep * them out of the session cache. */ session->session_id_length = 0; } else { session->session_id_length = SSL3_SSL_SESSION_ID_LENGTH; if (!RAND_bytes(session->session_id, session->session_id_length)) { goto err; } } } else { session->session_id_length = 0; } if (ssl->cert->sid_ctx_length > sizeof(session->sid_ctx)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); goto err; } OPENSSL_memcpy(session->sid_ctx, ssl->cert->sid_ctx, ssl->cert->sid_ctx_length); session->sid_ctx_length = ssl->cert->sid_ctx_length; /* The session is marked not resumable until it is completely filled in. */ session->not_resumable = 1; session->verify_result = X509_V_ERR_INVALID_CALL; SSL_SESSION_free(hs->new_session); hs->new_session = session; ssl_set_session(ssl, NULL); return 1; err: SSL_SESSION_free(session); return 0; }
static int aead_aes_gcm_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { struct aead_aes_gcm_ctx *gcm_ctx = (struct aead_aes_gcm_ctx *) &ctx->state; if (extra_in_len + ctx->tag_len < ctx->tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < extra_in_len + ctx->tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len == 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } const AES_KEY *key = &gcm_ctx->ks.ks; GCM128_CONTEXT gcm; OPENSSL_memset(&gcm, 0, sizeof(gcm)); OPENSSL_memcpy(&gcm.gcm_key, &gcm_ctx->gcm_key, sizeof(gcm.gcm_key)); CRYPTO_gcm128_setiv(&gcm, key, nonce, nonce_len); if (ad_len > 0 && !CRYPTO_gcm128_aad(&gcm, ad, ad_len)) { return 0; } if (gcm_ctx->ctr) { if (!CRYPTO_gcm128_encrypt_ctr32(&gcm, key, in, out, in_len, gcm_ctx->ctr)) { return 0; } } else { if (!CRYPTO_gcm128_encrypt(&gcm, key, in, out, in_len)) { return 0; } } if (extra_in_len) { if (gcm_ctx->ctr) { if (!CRYPTO_gcm128_encrypt_ctr32(&gcm, key, extra_in, out_tag, extra_in_len, gcm_ctx->ctr)) { return 0; } } else { if (!CRYPTO_gcm128_encrypt(&gcm, key, extra_in, out_tag, extra_in_len)) { return 0; } } } CRYPTO_gcm128_tag(&gcm, out_tag + extra_in_len, ctx->tag_len); *out_tag_len = ctx->tag_len + extra_in_len; return 1; }
static int ssl_encrypt_ticket_with_cipher_ctx(SSL *ssl, CBB *out, const uint8_t *session_buf, size_t session_len) { int ret = 0; EVP_CIPHER_CTX ctx; EVP_CIPHER_CTX_init(&ctx); HMAC_CTX hctx; HMAC_CTX_init(&hctx); /* If the session is too long, emit a dummy value rather than abort the * connection. */ static const size_t kMaxTicketOverhead = 16 + EVP_MAX_IV_LENGTH + EVP_MAX_BLOCK_LENGTH + EVP_MAX_MD_SIZE; if (session_len > 0xffff - kMaxTicketOverhead) { static const char kTicketPlaceholder[] = "TICKET TOO LARGE"; if (CBB_add_bytes(out, (const uint8_t *)kTicketPlaceholder, strlen(kTicketPlaceholder))) { ret = 1; } goto err; } /* Initialize HMAC and cipher contexts. If callback present it does all the * work otherwise use generated values from parent ctx. */ SSL_CTX *tctx = ssl->session_ctx; uint8_t iv[EVP_MAX_IV_LENGTH]; uint8_t key_name[16]; if (tctx->tlsext_ticket_key_cb != NULL) { if (tctx->tlsext_ticket_key_cb(ssl, key_name, iv, &ctx, &hctx, 1 /* encrypt */) < 0) { goto err; } } else { if (!RAND_bytes(iv, 16) || !EVP_EncryptInit_ex(&ctx, EVP_aes_128_cbc(), NULL, tctx->tlsext_tick_aes_key, iv) || !HMAC_Init_ex(&hctx, tctx->tlsext_tick_hmac_key, 16, tlsext_tick_md(), NULL)) { goto err; } OPENSSL_memcpy(key_name, tctx->tlsext_tick_key_name, 16); } uint8_t *ptr; if (!CBB_add_bytes(out, key_name, 16) || !CBB_add_bytes(out, iv, EVP_CIPHER_CTX_iv_length(&ctx)) || !CBB_reserve(out, &ptr, session_len + EVP_MAX_BLOCK_LENGTH)) { goto err; } size_t total = 0; #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) OPENSSL_memcpy(ptr, session_buf, session_len); total = session_len; #else int len; if (!EVP_EncryptUpdate(&ctx, ptr + total, &len, session_buf, session_len)) { goto err; } total += len; if (!EVP_EncryptFinal_ex(&ctx, ptr + total, &len)) { goto err; } total += len; #endif if (!CBB_did_write(out, total)) { goto err; } unsigned hlen; if (!HMAC_Update(&hctx, CBB_data(out), CBB_len(out)) || !CBB_reserve(out, &ptr, EVP_MAX_MD_SIZE) || !HMAC_Final(&hctx, ptr, &hlen) || !CBB_did_write(out, hlen)) { goto err; } ret = 1; err: EVP_CIPHER_CTX_cleanup(&ctx); HMAC_CTX_cleanup(&hctx); return ret; }
void CRYPTO_POLYVAL_finish(const struct polyval_ctx *ctx, uint8_t out[16]) { polyval_block S = ctx->S; byte_reverse(&S); OPENSSL_memcpy(out, &S.c, sizeof(polyval_block)); }
int ECDH_compute_key(void *out, size_t outlen, const EC_POINT *pub_key, const EC_KEY *priv_key, void *(*kdf)(const void *in, size_t inlen, void *out, size_t *outlen)) { if (priv_key->priv_key == NULL) { OPENSSL_PUT_ERROR(ECDH, ECDH_R_NO_PRIVATE_VALUE); return -1; } const EC_SCALAR *const priv = &priv_key->priv_key->scalar; BN_CTX *ctx = BN_CTX_new(); if (ctx == NULL) { return -1; } BN_CTX_start(ctx); int ret = -1; size_t buflen = 0; uint8_t *buf = NULL; const EC_GROUP *const group = EC_KEY_get0_group(priv_key); EC_POINT *tmp = EC_POINT_new(group); if (tmp == NULL) { OPENSSL_PUT_ERROR(ECDH, ERR_R_MALLOC_FAILURE); goto err; } if (!ec_point_mul_scalar(group, tmp, NULL, pub_key, priv, ctx)) { OPENSSL_PUT_ERROR(ECDH, ECDH_R_POINT_ARITHMETIC_FAILURE); goto err; } BIGNUM *x = BN_CTX_get(ctx); if (!x) { OPENSSL_PUT_ERROR(ECDH, ERR_R_MALLOC_FAILURE); goto err; } if (!EC_POINT_get_affine_coordinates_GFp(group, tmp, x, NULL, ctx)) { OPENSSL_PUT_ERROR(ECDH, ECDH_R_POINT_ARITHMETIC_FAILURE); goto err; } buflen = (EC_GROUP_get_degree(group) + 7) / 8; buf = OPENSSL_malloc(buflen); if (buf == NULL) { OPENSSL_PUT_ERROR(ECDH, ERR_R_MALLOC_FAILURE); goto err; } if (!BN_bn2bin_padded(buf, buflen, x)) { OPENSSL_PUT_ERROR(ECDH, ERR_R_INTERNAL_ERROR); goto err; } if (kdf != NULL) { if (kdf(buf, buflen, out, &outlen) == NULL) { OPENSSL_PUT_ERROR(ECDH, ECDH_R_KDF_FAILED); goto err; } } else { // no KDF, just copy as much as we can if (buflen < outlen) { outlen = buflen; } OPENSSL_memcpy(out, buf, outlen); } if (outlen > INT_MAX) { OPENSSL_PUT_ERROR(ECDH, ERR_R_OVERFLOW); goto err; } ret = (int)outlen; err: OPENSSL_free(buf); EC_POINT_free(tmp); BN_CTX_end(ctx); BN_CTX_free(ctx); return ret; }