int BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, CRYPTO_MUTEX *lock, const BIGNUM *mod, BN_CTX *bn_ctx) { CRYPTO_MUTEX_lock_read(lock); BN_MONT_CTX *ctx = *pmont; CRYPTO_MUTEX_unlock_read(lock); if (ctx) { return 1; } CRYPTO_MUTEX_lock_write(lock); ctx = *pmont; if (ctx) { goto out; } ctx = BN_MONT_CTX_new(); if (ctx == NULL) { goto out; } if (!BN_MONT_CTX_set(ctx, mod, bn_ctx)) { BN_MONT_CTX_free(ctx); ctx = NULL; goto out; } *pmont = ctx; out: CRYPTO_MUTEX_unlock_write(lock); return ctx != NULL; }
/* ssl_lookup_session looks up |session_id| in the session cache and sets * |*out_session| to an |SSL_SESSION| object if found. The caller takes * ownership of the result. */ static enum ssl_session_result_t ssl_lookup_session( SSL *ssl, SSL_SESSION **out_session, const uint8_t *session_id, size_t session_id_len) { *out_session = NULL; if (session_id_len == 0 || session_id_len > SSL_MAX_SSL_SESSION_ID_LENGTH) { return ssl_session_success; } SSL_SESSION *session; /* Try the internal cache, if it exists. */ if (!(ssl->initial_ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_LOOKUP)) { SSL_SESSION data; data.ssl_version = ssl->version; data.session_id_length = session_id_len; memcpy(data.session_id, session_id, session_id_len); CRYPTO_MUTEX_lock_read(&ssl->initial_ctx->lock); session = lh_SSL_SESSION_retrieve(ssl->initial_ctx->sessions, &data); if (session != NULL) { SSL_SESSION_up_ref(session); } /* TODO(davidben): This should probably move it to the front of the list. */ CRYPTO_MUTEX_unlock(&ssl->initial_ctx->lock); if (session != NULL) { *out_session = session; return ssl_session_success; } } /* Fall back to the external cache, if it exists. */ if (ssl->initial_ctx->get_session_cb == NULL) { return ssl_session_success; } int copy = 1; session = ssl->initial_ctx->get_session_cb(ssl, (uint8_t *)session_id, session_id_len, ©); if (session == NULL) { return ssl_session_success; } if (session == SSL_magic_pending_session_ptr()) { return ssl_session_retry; } /* Increment reference count now if the session callback asks us to do so * (note that if the session structures returned by the callback are shared * between threads, it must handle the reference count itself [i.e. copy == * 0], or things won't be thread-safe). */ if (copy) { SSL_SESSION_up_ref(session); } /* Add the externally cached session to the internal cache if necessary. */ if (!(ssl->initial_ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_STORE)) { SSL_CTX_add_session(ssl->initial_ctx, session); } *out_session = session; return ssl_session_success; }
// freeze_private_key finishes initializing |rsa|'s private key components. // After this function has returned, |rsa| may not be changed. This is needed // because |RSA| is a public struct and, additionally, OpenSSL 1.1.0 opaquified // it wrong (see https://github.com/openssl/openssl/issues/5158). static int freeze_private_key(RSA *rsa, BN_CTX *ctx) { CRYPTO_MUTEX_lock_read(&rsa->lock); int frozen = rsa->private_key_frozen; CRYPTO_MUTEX_unlock_read(&rsa->lock); if (frozen) { return 1; } int ret = 0; CRYPTO_MUTEX_lock_write(&rsa->lock); if (rsa->private_key_frozen) { ret = 1; goto err; } // Pre-compute various intermediate values, as well as copies of private // exponents with correct widths. Note that other threads may concurrently // read from |rsa->n|, |rsa->e|, etc., so any fixes must be in separate // copies. We use |mont_n->N|, |mont_p->N|, and |mont_q->N| as copies of |n|, // |p|, and |q| with the correct minimal widths. if (rsa->mont_n == NULL) { rsa->mont_n = BN_MONT_CTX_new_for_modulus(rsa->n, ctx); if (rsa->mont_n == NULL) { goto err; } } const BIGNUM *n_fixed = &rsa->mont_n->N; // The only public upper-bound of |rsa->d| is the bit length of |rsa->n|. The // ASN.1 serialization of RSA private keys unfortunately leaks the byte length // of |rsa->d|, but normalize it so we only leak it once, rather than per // operation. if (rsa->d != NULL && !ensure_fixed_copy(&rsa->d_fixed, rsa->d, n_fixed->width)) { goto err; } if (rsa->p != NULL && rsa->q != NULL) { if (rsa->mont_p == NULL) { rsa->mont_p = BN_MONT_CTX_new_for_modulus(rsa->p, ctx); if (rsa->mont_p == NULL) { goto err; } } const BIGNUM *p_fixed = &rsa->mont_p->N; if (rsa->mont_q == NULL) { rsa->mont_q = BN_MONT_CTX_new_for_modulus(rsa->q, ctx); if (rsa->mont_q == NULL) { goto err; } } const BIGNUM *q_fixed = &rsa->mont_q->N; if (rsa->dmp1 != NULL && rsa->dmq1 != NULL) { // Key generation relies on this function to compute |iqmp|. if (rsa->iqmp == NULL) { BIGNUM *iqmp = BN_new(); if (iqmp == NULL || !bn_mod_inverse_secret_prime(iqmp, rsa->q, rsa->p, ctx, rsa->mont_p)) { BN_free(iqmp); goto err; } rsa->iqmp = iqmp; } // CRT components are only publicly bounded by their corresponding // moduli's bit lengths. |rsa->iqmp| is unused outside of this one-time // setup, so we do not compute a fixed-width version of it. if (!ensure_fixed_copy(&rsa->dmp1_fixed, rsa->dmp1, p_fixed->width) || !ensure_fixed_copy(&rsa->dmq1_fixed, rsa->dmq1, q_fixed->width)) { goto err; } // Compute |inv_small_mod_large_mont|. Note that it is always modulo the // larger prime, independent of what is stored in |rsa->iqmp|. if (rsa->inv_small_mod_large_mont == NULL) { BIGNUM *inv_small_mod_large_mont = BN_new(); int ok; if (BN_cmp(rsa->p, rsa->q) < 0) { ok = inv_small_mod_large_mont != NULL && bn_mod_inverse_secret_prime(inv_small_mod_large_mont, rsa->p, rsa->q, ctx, rsa->mont_q) && BN_to_montgomery(inv_small_mod_large_mont, inv_small_mod_large_mont, rsa->mont_q, ctx); } else { ok = inv_small_mod_large_mont != NULL && BN_to_montgomery(inv_small_mod_large_mont, rsa->iqmp, rsa->mont_p, ctx); } if (!ok) { BN_free(inv_small_mod_large_mont); goto err; } rsa->inv_small_mod_large_mont = inv_small_mod_large_mont; } } } rsa->private_key_frozen = 1; ret = 1; err: CRYPTO_MUTEX_unlock_write(&rsa->lock); return ret; }
CRYPTO_BUFFER *CRYPTO_BUFFER_new(const uint8_t *data, size_t len, CRYPTO_BUFFER_POOL *pool) { if (pool != NULL) { CRYPTO_BUFFER tmp; tmp.data = (uint8_t *) data; tmp.len = len; CRYPTO_MUTEX_lock_read(&pool->lock); CRYPTO_BUFFER *const duplicate = lh_CRYPTO_BUFFER_retrieve(pool->bufs, &tmp); if (duplicate != NULL) { CRYPTO_refcount_inc(&duplicate->references); } CRYPTO_MUTEX_unlock_read(&pool->lock); if (duplicate != NULL) { return duplicate; } } CRYPTO_BUFFER *const buf = OPENSSL_malloc(sizeof(CRYPTO_BUFFER)); if (buf == NULL) { return NULL; } OPENSSL_memset(buf, 0, sizeof(CRYPTO_BUFFER)); buf->data = BUF_memdup(data, len); if (len != 0 && buf->data == NULL) { OPENSSL_free(buf); return NULL; } buf->len = len; buf->references = 1; if (pool == NULL) { return buf; } buf->pool = pool; CRYPTO_MUTEX_lock_write(&pool->lock); CRYPTO_BUFFER *duplicate = lh_CRYPTO_BUFFER_retrieve(pool->bufs, buf); int inserted = 0; if (duplicate == NULL) { CRYPTO_BUFFER *old = NULL; inserted = lh_CRYPTO_BUFFER_insert(pool->bufs, &old, buf); assert(old == NULL); } else { CRYPTO_refcount_inc(&duplicate->references); } CRYPTO_MUTEX_unlock_write(&pool->lock); if (!inserted) { // We raced to insert |buf| into the pool and lost, or else there was an // error inserting. OPENSSL_free(buf->data); OPENSSL_free(buf); return duplicate; } return buf; }
// freeze_private_key finishes initializing |rsa|'s private key components. // After this function has returned, |rsa| may not be changed. This is needed // because |RSA| is a public struct and, additionally, OpenSSL 1.1.0 opaquified // it wrong (see https://github.com/openssl/openssl/issues/5158). static int freeze_private_key(RSA *rsa, BN_CTX *ctx) { CRYPTO_MUTEX_lock_read(&rsa->lock); int flags = rsa->flags; CRYPTO_MUTEX_unlock_read(&rsa->lock); if (flags & RSA_FLAG_PRIVATE_KEY_FROZEN) { return 1; } int ret = 0; CRYPTO_MUTEX_lock_write(&rsa->lock); if (rsa->flags & RSA_FLAG_PRIVATE_KEY_FROZEN) { ret = 1; goto err; } // |rsa->n| is public. Normalize the width. bn_set_minimal_width(rsa->n); if (rsa->mont_n == NULL) { rsa->mont_n = BN_MONT_CTX_new_for_modulus(rsa->n, ctx); if (rsa->mont_n == NULL) { goto err; } } // The only public upper-bound of |rsa->d| is the bit length of |rsa->n|. The // ASN.1 serialization of RSA private keys unfortunately leaks the byte length // of |rsa->d|, but normalize it so we only leak it once, rather than per // operation. if (rsa->d != NULL && !bn_resize_words(rsa->d, rsa->n->width)) { goto err; } if (rsa->p != NULL && rsa->q != NULL) { // |p| and |q| have public bit lengths. bn_set_minimal_width(rsa->p); bn_set_minimal_width(rsa->q); if (rsa->mont_p == NULL) { rsa->mont_p = BN_MONT_CTX_new_for_modulus(rsa->p, ctx); if (rsa->mont_p == NULL) { goto err; } } if (rsa->mont_q == NULL) { rsa->mont_q = BN_MONT_CTX_new_for_modulus(rsa->q, ctx); if (rsa->mont_q == NULL) { goto err; } } if (rsa->dmp1 != NULL && rsa->dmq1 != NULL) { // Key generation relies on this function to compute |iqmp|. if (rsa->iqmp == NULL) { BIGNUM *iqmp = BN_new(); if (iqmp == NULL || !bn_mod_inverse_secret_prime(iqmp, rsa->q, rsa->p, ctx, rsa->mont_p)) { BN_free(iqmp); goto err; } rsa->iqmp = iqmp; } // CRT components are only publicly bounded by their corresponding // moduli's bit lengths. if (!bn_resize_words(rsa->dmp1, rsa->p->width) || !bn_resize_words(rsa->dmq1, rsa->q->width) || !bn_resize_words(rsa->iqmp, rsa->p->width)) { goto err; } // Compute |inv_small_mod_large_mont|. Note that it is always modulo the // larger prime, independent of what is stored in |rsa->iqmp|. if (rsa->inv_small_mod_large_mont == NULL) { BIGNUM *inv_small_mod_large_mont = BN_new(); int ok; if (BN_cmp(rsa->p, rsa->q) < 0) { ok = inv_small_mod_large_mont != NULL && bn_mod_inverse_secret_prime(inv_small_mod_large_mont, rsa->p, rsa->q, ctx, rsa->mont_q) && BN_to_montgomery(inv_small_mod_large_mont, inv_small_mod_large_mont, rsa->mont_q, ctx); } else { ok = inv_small_mod_large_mont != NULL && BN_to_montgomery(inv_small_mod_large_mont, rsa->iqmp, rsa->mont_p, ctx); } if (!ok) { BN_free(inv_small_mod_large_mont); goto err; } rsa->inv_small_mod_large_mont = inv_small_mod_large_mont; } } } rsa->flags |= RSA_FLAG_PRIVATE_KEY_FROZEN; ret = 1; err: CRYPTO_MUTEX_unlock_write(&rsa->lock); return ret; }
/* ssl_get_prev attempts to find an SSL_SESSION to be used to resume this * connection. It is only called by servers. * * ctx: contains the early callback context, which is the result of a * shallow parse of the ClientHello. * * Returns: * -1: error * 0: a session may have been found. * * Side effects: * - If a session is found then s->session is pointed at it (after freeing an * existing session if need be) and s->verify_result is set from the session. * - Both for new and resumed sessions, s->tlsext_ticket_expected is set to 1 * if the server should issue a new session ticket (to 0 otherwise). */ int ssl_get_prev_session(SSL *s, const struct ssl_early_callback_ctx *ctx) { /* This is used only by servers. */ SSL_SESSION *ret = NULL; int fatal = 0; int try_session_cache = 1; int r; if (ctx->session_id_len > SSL_MAX_SSL_SESSION_ID_LENGTH) { goto err; } if (ctx->session_id_len == 0) { try_session_cache = 0; } r = tls1_process_ticket(s, ctx, &ret); /* sets s->tlsext_ticket_expected */ switch (r) { case -1: /* Error during processing */ fatal = 1; goto err; case 0: /* No ticket found */ case 1: /* Zero length ticket found */ break; /* Ok to carry on processing session id. */ case 2: /* Ticket found but not decrypted. */ case 3: /* Ticket decrypted, *ret has been set. */ try_session_cache = 0; break; default: abort(); } if (try_session_cache && ret == NULL && !(s->initial_ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_LOOKUP)) { SSL_SESSION data; data.ssl_version = s->version; data.session_id_length = ctx->session_id_len; if (ctx->session_id_len == 0) { return 0; } memcpy(data.session_id, ctx->session_id, ctx->session_id_len); CRYPTO_MUTEX_lock_read(&s->initial_ctx->lock); ret = lh_SSL_SESSION_retrieve(s->initial_ctx->sessions, &data); if (ret != NULL) { SSL_SESSION_up_ref(ret); } CRYPTO_MUTEX_unlock(&s->initial_ctx->lock); } if (try_session_cache && ret == NULL && s->initial_ctx->get_session_cb != NULL) { int copy = 1; ret = s->initial_ctx->get_session_cb(s, (uint8_t *)ctx->session_id, ctx->session_id_len, ©); if (ret != NULL) { if (ret == SSL_magic_pending_session_ptr()) { /* This is a magic value which indicates that the callback needs to * unwind the stack and figure out the session asynchronously. */ return PENDING_SESSION; } /* Increment reference count now if the session callback asks us to do so * (note that if the session structures returned by the callback are * shared between threads, it must handle the reference count itself * [i.e. copy == 0], or things won't be thread-safe). */ if (copy) { SSL_SESSION_up_ref(ret); } /* Add the externally cached session to the internal cache as well if and * only if we are supposed to. */ if (!(s->initial_ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_STORE)) { /* The following should not return 1, otherwise, things are very * strange */ SSL_CTX_add_session(s->initial_ctx, ret); } } } if (ret == NULL) { goto err; } /* Now ret is non-NULL and we own one of its reference counts. */ if (ret->sid_ctx_length != s->sid_ctx_length || memcmp(ret->sid_ctx, s->sid_ctx, ret->sid_ctx_length)) { /* We have the session requested by the client, but we don't want to use it * in this context. */ goto err; /* treat like cache miss */ } if ((s->verify_mode & SSL_VERIFY_PEER) && s->sid_ctx_length == 0) { /* We can't be sure if this session is being used out of context, which is * especially important for SSL_VERIFY_PEER. The application should have * used SSL[_CTX]_set_session_id_context. * * For this error case, we generate an error instead of treating the event * like a cache miss (otherwise it would be easy for applications to * effectively disable the session cache by accident without anyone * noticing). */ OPENSSL_PUT_ERROR(SSL, ssl_get_prev_session, SSL_R_SESSION_ID_CONTEXT_UNINITIALIZED); fatal = 1; goto err; } if (ret->timeout < (long)(time(NULL) - ret->time)) { /* timeout */ if (try_session_cache) { /* session was from the cache, so remove it */ SSL_CTX_remove_session(s->initial_ctx, ret); } goto err; } SSL_SESSION_free(s->session); s->session = ret; s->verify_result = s->session->verify_result; return 1; err: if (ret != NULL) { SSL_SESSION_free(ret); if (!try_session_cache) { /* The session was from a ticket, so we should * issue a ticket for the new session */ s->tlsext_ticket_expected = 1; } } if (fatal) { return -1; } return 0; }