static int remove_session_lock(SSL_CTX *ctx, SSL_SESSION *session, int lock) {
  int ret = 0;

  if (session != NULL && session->session_id_length != 0) {
    if (lock) {
      CRYPTO_MUTEX_lock_write(&ctx->lock);
    }
    SSL_SESSION *found_session = lh_SSL_SESSION_retrieve(ctx->sessions,
                                                         session);
    if (found_session == session) {
      ret = 1;
      found_session = lh_SSL_SESSION_delete(ctx->sessions, session);
      SSL_SESSION_list_remove(ctx, session);
    }

    if (lock) {
      CRYPTO_MUTEX_unlock(&ctx->lock);
    }

    if (ret) {
      found_session->not_resumable = 1;
      if (ctx->remove_session_cb != NULL) {
        ctx->remove_session_cb(ctx, found_session);
      }
      SSL_SESSION_free(found_session);
    }
  }

  return ret;
}
Beispiel #2
0
int BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, CRYPTO_MUTEX *lock,
                           const BIGNUM *mod, BN_CTX *bn_ctx) {
  CRYPTO_MUTEX_lock_read(lock);
  BN_MONT_CTX *ctx = *pmont;
  CRYPTO_MUTEX_unlock_read(lock);

  if (ctx) {
    return 1;
  }

  CRYPTO_MUTEX_lock_write(lock);
  ctx = *pmont;
  if (ctx) {
    goto out;
  }

  ctx = BN_MONT_CTX_new();
  if (ctx == NULL) {
    goto out;
  }
  if (!BN_MONT_CTX_set(ctx, mod, bn_ctx)) {
    BN_MONT_CTX_free(ctx);
    ctx = NULL;
    goto out;
  }
  *pmont = ctx;

out:
  CRYPTO_MUTEX_unlock_write(lock);
  return ctx != NULL;
}
Beispiel #3
0
int X509_STORE_add_crl(X509_STORE *ctx, X509_CRL *x)
{
    X509_OBJECT *obj;
    int ret = 1;

    if (x == NULL)
        return 0;
    obj = (X509_OBJECT *)OPENSSL_malloc(sizeof(X509_OBJECT));
    if (obj == NULL) {
        OPENSSL_PUT_ERROR(X509, ERR_R_MALLOC_FAILURE);
        return 0;
    }
    obj->type = X509_LU_CRL;
    obj->data.crl = x;

    CRYPTO_MUTEX_lock_write(&ctx->objs_lock);

    X509_OBJECT_up_ref_count(obj);

    if (X509_OBJECT_retrieve_match(ctx->objs, obj)) {
        X509_OBJECT_free_contents(obj);
        OPENSSL_free(obj);
        OPENSSL_PUT_ERROR(X509, X509_R_CERT_ALREADY_IN_HASH_TABLE);
        ret = 0;
    } else if (!sk_X509_OBJECT_push(ctx->objs, obj)) {
        X509_OBJECT_free_contents(obj);
        OPENSSL_free(obj);
        OPENSSL_PUT_ERROR(X509, ERR_R_MALLOC_FAILURE);
        ret = 0;
    }

    CRYPTO_MUTEX_unlock_write(&ctx->objs_lock);

    return ret;
}
Beispiel #4
0
int X509_STORE_get_by_subject(X509_STORE_CTX *vs, int type, X509_NAME *name,
                              X509_OBJECT *ret)
{
    X509_STORE *ctx = vs->ctx;
    X509_LOOKUP *lu;
    X509_OBJECT stmp, *tmp;
    int i;

    CRYPTO_MUTEX_lock_write(&ctx->objs_lock);
    tmp = X509_OBJECT_retrieve_by_subject(ctx->objs, type, name);
    CRYPTO_MUTEX_unlock_write(&ctx->objs_lock);

    if (tmp == NULL || type == X509_LU_CRL) {
        for (i = 0; i < (int)sk_X509_LOOKUP_num(ctx->get_cert_methods); i++) {
            lu = sk_X509_LOOKUP_value(ctx->get_cert_methods, i);
            if (X509_LOOKUP_by_subject(lu, type, name, &stmp)) {
                tmp = &stmp;
                break;
            }
        }
        if (tmp == NULL)
            return 0;
    }

    /*
     * if (ret->data.ptr != NULL) X509_OBJECT_free_contents(ret);
     */

    ret->type = tmp->type;
    ret->data.ptr = tmp->data.ptr;

    X509_OBJECT_up_ref_count(ret);

    return 1;
}
Beispiel #5
0
static int remove_session_lock(SSL_CTX *ctx, SSL_SESSION *c, int lock) {
  SSL_SESSION *r;
  int ret = 0;

  if (c != NULL && c->session_id_length != 0) {
    if (lock) {
      CRYPTO_MUTEX_lock_write(&ctx->lock);
    }
    r = lh_SSL_SESSION_retrieve(ctx->sessions, c);
    if (r == c) {
      ret = 1;
      r = lh_SSL_SESSION_delete(ctx->sessions, c);
      SSL_SESSION_list_remove(ctx, c);
    }

    if (lock) {
      CRYPTO_MUTEX_unlock(&ctx->lock);
    }

    if (ret) {
      r->not_resumable = 1;
      if (ctx->remove_session_cb != NULL) {
        ctx->remove_session_cb(ctx, r);
      }
      SSL_SESSION_free(r);
    }
  }

  return ret;
}
Beispiel #6
0
int SSL_CTX_add_session(SSL_CTX *ctx, SSL_SESSION *c) {
  int ret = 0;
  SSL_SESSION *s;

  /* add just 1 reference count for the SSL_CTX's session cache even though it
   * has two ways of access: each session is in a doubly linked list and an
   * lhash */
  SSL_SESSION_up_ref(c);
  /* if session c is in already in cache, we take back the increment later */

  CRYPTO_MUTEX_lock_write(&ctx->lock);
  if (!lh_SSL_SESSION_insert(ctx->sessions, &s, c)) {
    CRYPTO_MUTEX_unlock(&ctx->lock);
    return 0;
  }

  /* s != NULL iff we already had a session with the given PID. In this case, s
   * == c should hold (then we did not really modify ctx->sessions), or we're
   * in trouble. */
  if (s != NULL && s != c) {
    /* We *are* in trouble ... */
    SSL_SESSION_list_remove(ctx, s);
    SSL_SESSION_free(s);
    /* ... so pretend the other session did not exist in cache (we cannot
     * handle two SSL_SESSION structures with identical session ID in the same
     * cache, which could happen e.g. when two threads concurrently obtain the
     * same session from an external cache) */
    s = NULL;
  }

  /* Put at the head of the queue unless it is already in the cache */
  if (s == NULL) {
    SSL_SESSION_list_add(ctx, c);
  }

  if (s != NULL) {
    /* existing cache entry -- decrement previously incremented reference count
     * because it already takes into account the cache */
    SSL_SESSION_free(s); /* s == c */
    ret = 0;
  } else {
    /* new cache entry -- remove old ones if cache has become too large */
    ret = 1;

    if (SSL_CTX_sess_get_cache_size(ctx) > 0) {
      while (SSL_CTX_sess_number(ctx) > SSL_CTX_sess_get_cache_size(ctx)) {
        if (!remove_session_lock(ctx, ctx->session_cache_tail, 0)) {
          break;
        }
      }
    }
  }

  CRYPTO_MUTEX_unlock(&ctx->lock);
  return ret;
}
Beispiel #7
0
/* rsa_blinding_release marks the cached BN_BLINDING at the given index as free
 * for other threads to use. */
static void rsa_blinding_release(RSA *rsa, BN_BLINDING *blinding,
                                 unsigned blinding_index) {
  if (blinding_index == MAX_BLINDINGS_PER_RSA) {
    /* This blinding wasn't cached. */
    BN_BLINDING_free(blinding);
    return;
  }

  CRYPTO_MUTEX_lock_write(&rsa->lock);
  rsa->blindings_inuse[blinding_index] = 0;
  CRYPTO_MUTEX_unlock_write(&rsa->lock);
}
void SSL_CTX_flush_sessions(SSL_CTX *ctx, long time) {
  TIMEOUT_PARAM tp;

  tp.ctx = ctx;
  tp.cache = ctx->sessions;
  if (tp.cache == NULL) {
    return;
  }
  tp.time = time;
  CRYPTO_MUTEX_lock_write(&ctx->lock);
  lh_SSL_SESSION_doall_arg(tp.cache, timeout_doall_arg, &tp);
  CRYPTO_MUTEX_unlock(&ctx->lock);
}
Beispiel #9
0
void CRYPTO_BUFFER_POOL_free(CRYPTO_BUFFER_POOL *pool) {
  if (pool == NULL) {
    return;
  }

#if !defined(NDEBUG)
  CRYPTO_MUTEX_lock_write(&pool->lock);
  assert(lh_CRYPTO_BUFFER_num_items(pool->bufs) == 0);
  CRYPTO_MUTEX_unlock_write(&pool->lock);
#endif

  lh_CRYPTO_BUFFER_free(pool->bufs);
  CRYPTO_MUTEX_cleanup(&pool->lock);
  OPENSSL_free(pool);
}
int SSL_CTX_add_session(SSL_CTX *ctx, SSL_SESSION *session) {
  /* Although |session| is inserted into two structures (a doubly-linked list
   * and the hash table), |ctx| only takes one reference. */
  SSL_SESSION_up_ref(session);

  SSL_SESSION *old_session;
  CRYPTO_MUTEX_lock_write(&ctx->lock);
  if (!lh_SSL_SESSION_insert(ctx->sessions, &old_session, session)) {
    CRYPTO_MUTEX_unlock(&ctx->lock);
    SSL_SESSION_free(session);
    return 0;
  }

  if (old_session != NULL) {
    if (old_session == session) {
      /* |session| was already in the cache. */
      CRYPTO_MUTEX_unlock(&ctx->lock);
      SSL_SESSION_free(old_session);
      return 0;
    }

    /* There was a session ID collision. |old_session| must be removed from
     * the linked list and released. */
    SSL_SESSION_list_remove(ctx, old_session);
    SSL_SESSION_free(old_session);
  }

  SSL_SESSION_list_add(ctx, session);

  /* Enforce any cache size limits. */
  if (SSL_CTX_sess_get_cache_size(ctx) > 0) {
    while (SSL_CTX_sess_number(ctx) > SSL_CTX_sess_get_cache_size(ctx)) {
      if (!remove_session_lock(ctx, ctx->session_cache_tail, 0)) {
        break;
      }
    }
  }

  CRYPTO_MUTEX_unlock(&ctx->lock);
  return 1;
}
Beispiel #11
0
void CRYPTO_BUFFER_free(CRYPTO_BUFFER *buf) {
  if (buf == NULL) {
    return;
  }

  CRYPTO_BUFFER_POOL *const pool = buf->pool;
  if (pool == NULL) {
    if (CRYPTO_refcount_dec_and_test_zero(&buf->references)) {
      // If a reference count of zero is observed, there cannot be a reference
      // from any pool to this buffer and thus we are able to free this
      // buffer.
      OPENSSL_free(buf->data);
      OPENSSL_free(buf);
    }

    return;
  }

  CRYPTO_MUTEX_lock_write(&pool->lock);
  if (!CRYPTO_refcount_dec_and_test_zero(&buf->references)) {
    CRYPTO_MUTEX_unlock_write(&buf->pool->lock);
    return;
  }

  // We have an exclusive lock on the pool, therefore no concurrent lookups can
  // find this buffer and increment the reference count. Thus, if the count is
  // zero there are and can never be any more references and thus we can free
  // this buffer.
  void *found = lh_CRYPTO_BUFFER_delete(pool->bufs, buf);
  assert(found != NULL);
  assert(found == buf);
  (void)found;
  CRYPTO_MUTEX_unlock_write(&buf->pool->lock);
  OPENSSL_free(buf->data);
  OPENSSL_free(buf);
}
Beispiel #12
0
/* rsa_blinding_get returns a BN_BLINDING to use with |rsa|. It does this by
 * allocating one of the cached BN_BLINDING objects in |rsa->blindings|. If
 * none are free, the cache will be extended by a extra element and the new
 * BN_BLINDING is returned.
 *
 * On success, the index of the assigned BN_BLINDING is written to
 * |*index_used| and must be passed to |rsa_blinding_release| when finished. */
static BN_BLINDING *rsa_blinding_get(RSA *rsa, unsigned *index_used,
                                     BN_CTX *ctx) {
  assert(ctx != NULL);
  assert(rsa->mont_n != NULL);

  BN_BLINDING *ret = NULL;
  BN_BLINDING **new_blindings;
  uint8_t *new_blindings_inuse;
  char overflow = 0;

  CRYPTO_MUTEX_lock_write(&rsa->lock);

  unsigned i;
  for (i = 0; i < rsa->num_blindings; i++) {
    if (rsa->blindings_inuse[i] == 0) {
      rsa->blindings_inuse[i] = 1;
      ret = rsa->blindings[i];
      *index_used = i;
      break;
    }
  }

  if (ret != NULL) {
    CRYPTO_MUTEX_unlock_write(&rsa->lock);
    return ret;
  }

  overflow = rsa->num_blindings >= MAX_BLINDINGS_PER_RSA;

  /* We didn't find a free BN_BLINDING to use so increase the length of
   * the arrays by one and use the newly created element. */

  CRYPTO_MUTEX_unlock_write(&rsa->lock);
  ret = BN_BLINDING_new();
  if (ret == NULL) {
    return NULL;
  }

  if (overflow) {
    /* We cannot add any more cached BN_BLINDINGs so we use |ret|
     * and mark it for destruction in |rsa_blinding_release|. */
    *index_used = MAX_BLINDINGS_PER_RSA;
    return ret;
  }

  CRYPTO_MUTEX_lock_write(&rsa->lock);

  new_blindings =
      OPENSSL_malloc(sizeof(BN_BLINDING *) * (rsa->num_blindings + 1));
  if (new_blindings == NULL) {
    goto err1;
  }
  memcpy(new_blindings, rsa->blindings,
         sizeof(BN_BLINDING *) * rsa->num_blindings);
  new_blindings[rsa->num_blindings] = ret;

  new_blindings_inuse = OPENSSL_malloc(rsa->num_blindings + 1);
  if (new_blindings_inuse == NULL) {
    goto err2;
  }
  memcpy(new_blindings_inuse, rsa->blindings_inuse, rsa->num_blindings);
  new_blindings_inuse[rsa->num_blindings] = 1;
  *index_used = rsa->num_blindings;

  OPENSSL_free(rsa->blindings);
  rsa->blindings = new_blindings;
  OPENSSL_free(rsa->blindings_inuse);
  rsa->blindings_inuse = new_blindings_inuse;
  rsa->num_blindings++;

  CRYPTO_MUTEX_unlock_write(&rsa->lock);
  return ret;

err2:
  OPENSSL_free(new_blindings);

err1:
  CRYPTO_MUTEX_unlock_write(&rsa->lock);
  BN_BLINDING_free(ret);
  return NULL;
}
Beispiel #13
0
// freeze_private_key finishes initializing |rsa|'s private key components.
// After this function has returned, |rsa| may not be changed. This is needed
// because |RSA| is a public struct and, additionally, OpenSSL 1.1.0 opaquified
// it wrong (see https://github.com/openssl/openssl/issues/5158).
static int freeze_private_key(RSA *rsa, BN_CTX *ctx) {
  CRYPTO_MUTEX_lock_read(&rsa->lock);
  int frozen = rsa->private_key_frozen;
  CRYPTO_MUTEX_unlock_read(&rsa->lock);
  if (frozen) {
    return 1;
  }

  int ret = 0;
  CRYPTO_MUTEX_lock_write(&rsa->lock);
  if (rsa->private_key_frozen) {
    ret = 1;
    goto err;
  }

  // Pre-compute various intermediate values, as well as copies of private
  // exponents with correct widths. Note that other threads may concurrently
  // read from |rsa->n|, |rsa->e|, etc., so any fixes must be in separate
  // copies. We use |mont_n->N|, |mont_p->N|, and |mont_q->N| as copies of |n|,
  // |p|, and |q| with the correct minimal widths.

  if (rsa->mont_n == NULL) {
    rsa->mont_n = BN_MONT_CTX_new_for_modulus(rsa->n, ctx);
    if (rsa->mont_n == NULL) {
      goto err;
    }
  }
  const BIGNUM *n_fixed = &rsa->mont_n->N;

  // The only public upper-bound of |rsa->d| is the bit length of |rsa->n|. The
  // ASN.1 serialization of RSA private keys unfortunately leaks the byte length
  // of |rsa->d|, but normalize it so we only leak it once, rather than per
  // operation.
  if (rsa->d != NULL &&
      !ensure_fixed_copy(&rsa->d_fixed, rsa->d, n_fixed->width)) {
    goto err;
  }

  if (rsa->p != NULL && rsa->q != NULL) {
    if (rsa->mont_p == NULL) {
      rsa->mont_p = BN_MONT_CTX_new_for_modulus(rsa->p, ctx);
      if (rsa->mont_p == NULL) {
        goto err;
      }
    }
    const BIGNUM *p_fixed = &rsa->mont_p->N;

    if (rsa->mont_q == NULL) {
      rsa->mont_q = BN_MONT_CTX_new_for_modulus(rsa->q, ctx);
      if (rsa->mont_q == NULL) {
        goto err;
      }
    }
    const BIGNUM *q_fixed = &rsa->mont_q->N;

    if (rsa->dmp1 != NULL && rsa->dmq1 != NULL) {
      // Key generation relies on this function to compute |iqmp|.
      if (rsa->iqmp == NULL) {
        BIGNUM *iqmp = BN_new();
        if (iqmp == NULL ||
            !bn_mod_inverse_secret_prime(iqmp, rsa->q, rsa->p, ctx,
                                         rsa->mont_p)) {
          BN_free(iqmp);
          goto err;
        }
        rsa->iqmp = iqmp;
      }

      // CRT components are only publicly bounded by their corresponding
      // moduli's bit lengths. |rsa->iqmp| is unused outside of this one-time
      // setup, so we do not compute a fixed-width version of it.
      if (!ensure_fixed_copy(&rsa->dmp1_fixed, rsa->dmp1, p_fixed->width) ||
          !ensure_fixed_copy(&rsa->dmq1_fixed, rsa->dmq1, q_fixed->width)) {
        goto err;
      }

      // Compute |inv_small_mod_large_mont|. Note that it is always modulo the
      // larger prime, independent of what is stored in |rsa->iqmp|.
      if (rsa->inv_small_mod_large_mont == NULL) {
        BIGNUM *inv_small_mod_large_mont = BN_new();
        int ok;
        if (BN_cmp(rsa->p, rsa->q) < 0) {
          ok = inv_small_mod_large_mont != NULL &&
               bn_mod_inverse_secret_prime(inv_small_mod_large_mont, rsa->p,
                                           rsa->q, ctx, rsa->mont_q) &&
               BN_to_montgomery(inv_small_mod_large_mont,
                                inv_small_mod_large_mont, rsa->mont_q, ctx);
        } else {
          ok = inv_small_mod_large_mont != NULL &&
               BN_to_montgomery(inv_small_mod_large_mont, rsa->iqmp,
                                rsa->mont_p, ctx);
        }
        if (!ok) {
          BN_free(inv_small_mod_large_mont);
          goto err;
        }
        rsa->inv_small_mod_large_mont = inv_small_mod_large_mont;
      }
    }
  }

  rsa->private_key_frozen = 1;
  ret = 1;

err:
  CRYPTO_MUTEX_unlock_write(&rsa->lock);
  return ret;
}
Beispiel #14
0
CRYPTO_BUFFER *CRYPTO_BUFFER_new(const uint8_t *data, size_t len,
                                 CRYPTO_BUFFER_POOL *pool) {
  if (pool != NULL) {
    CRYPTO_BUFFER tmp;
    tmp.data = (uint8_t *) data;
    tmp.len = len;

    CRYPTO_MUTEX_lock_read(&pool->lock);
    CRYPTO_BUFFER *const duplicate =
        lh_CRYPTO_BUFFER_retrieve(pool->bufs, &tmp);
    if (duplicate != NULL) {
      CRYPTO_refcount_inc(&duplicate->references);
    }
    CRYPTO_MUTEX_unlock_read(&pool->lock);

    if (duplicate != NULL) {
      return duplicate;
    }
  }

  CRYPTO_BUFFER *const buf = OPENSSL_malloc(sizeof(CRYPTO_BUFFER));
  if (buf == NULL) {
    return NULL;
  }
  OPENSSL_memset(buf, 0, sizeof(CRYPTO_BUFFER));

  buf->data = BUF_memdup(data, len);
  if (len != 0 && buf->data == NULL) {
    OPENSSL_free(buf);
    return NULL;
  }

  buf->len = len;
  buf->references = 1;

  if (pool == NULL) {
    return buf;
  }

  buf->pool = pool;

  CRYPTO_MUTEX_lock_write(&pool->lock);
  CRYPTO_BUFFER *duplicate = lh_CRYPTO_BUFFER_retrieve(pool->bufs, buf);
  int inserted = 0;
  if (duplicate == NULL) {
    CRYPTO_BUFFER *old = NULL;
    inserted = lh_CRYPTO_BUFFER_insert(pool->bufs, &old, buf);
    assert(old == NULL);
  } else {
    CRYPTO_refcount_inc(&duplicate->references);
  }
  CRYPTO_MUTEX_unlock_write(&pool->lock);

  if (!inserted) {
    // We raced to insert |buf| into the pool and lost, or else there was an
    // error inserting.
    OPENSSL_free(buf->data);
    OPENSSL_free(buf);
    return duplicate;
  }

  return buf;
}
Beispiel #15
0
// freeze_private_key finishes initializing |rsa|'s private key components.
// After this function has returned, |rsa| may not be changed. This is needed
// because |RSA| is a public struct and, additionally, OpenSSL 1.1.0 opaquified
// it wrong (see https://github.com/openssl/openssl/issues/5158).
static int freeze_private_key(RSA *rsa, BN_CTX *ctx) {
  CRYPTO_MUTEX_lock_read(&rsa->lock);
  int flags = rsa->flags;
  CRYPTO_MUTEX_unlock_read(&rsa->lock);
  if (flags & RSA_FLAG_PRIVATE_KEY_FROZEN) {
    return 1;
  }

  int ret = 0;
  CRYPTO_MUTEX_lock_write(&rsa->lock);
  if (rsa->flags & RSA_FLAG_PRIVATE_KEY_FROZEN) {
    ret = 1;
    goto err;
  }

  // |rsa->n| is public. Normalize the width.
  bn_set_minimal_width(rsa->n);
  if (rsa->mont_n == NULL) {
    rsa->mont_n = BN_MONT_CTX_new_for_modulus(rsa->n, ctx);
    if (rsa->mont_n == NULL) {
      goto err;
    }
  }

  // The only public upper-bound of |rsa->d| is the bit length of |rsa->n|. The
  // ASN.1 serialization of RSA private keys unfortunately leaks the byte length
  // of |rsa->d|, but normalize it so we only leak it once, rather than per
  // operation.
  if (rsa->d != NULL &&
      !bn_resize_words(rsa->d, rsa->n->width)) {
    goto err;
  }

  if (rsa->p != NULL && rsa->q != NULL) {
    // |p| and |q| have public bit lengths.
    bn_set_minimal_width(rsa->p);
    bn_set_minimal_width(rsa->q);

    if (rsa->mont_p == NULL) {
      rsa->mont_p = BN_MONT_CTX_new_for_modulus(rsa->p, ctx);
      if (rsa->mont_p == NULL) {
        goto err;
      }
    }

    if (rsa->mont_q == NULL) {
      rsa->mont_q = BN_MONT_CTX_new_for_modulus(rsa->q, ctx);
      if (rsa->mont_q == NULL) {
        goto err;
      }
    }

    if (rsa->dmp1 != NULL && rsa->dmq1 != NULL) {
      // Key generation relies on this function to compute |iqmp|.
      if (rsa->iqmp == NULL) {
        BIGNUM *iqmp = BN_new();
        if (iqmp == NULL ||
            !bn_mod_inverse_secret_prime(iqmp, rsa->q, rsa->p, ctx,
                                         rsa->mont_p)) {
          BN_free(iqmp);
          goto err;
        }
        rsa->iqmp = iqmp;
      }

      // CRT components are only publicly bounded by their corresponding
      // moduli's bit lengths.
      if (!bn_resize_words(rsa->dmp1, rsa->p->width) ||
          !bn_resize_words(rsa->dmq1, rsa->q->width) ||
          !bn_resize_words(rsa->iqmp, rsa->p->width)) {
        goto err;
      }

      // Compute |inv_small_mod_large_mont|. Note that it is always modulo the
      // larger prime, independent of what is stored in |rsa->iqmp|.
      if (rsa->inv_small_mod_large_mont == NULL) {
        BIGNUM *inv_small_mod_large_mont = BN_new();
        int ok;
        if (BN_cmp(rsa->p, rsa->q) < 0) {
          ok = inv_small_mod_large_mont != NULL &&
               bn_mod_inverse_secret_prime(inv_small_mod_large_mont, rsa->p,
                                           rsa->q, ctx, rsa->mont_q) &&
               BN_to_montgomery(inv_small_mod_large_mont,
                                inv_small_mod_large_mont, rsa->mont_q, ctx);
        } else {
          ok = inv_small_mod_large_mont != NULL &&
               BN_to_montgomery(inv_small_mod_large_mont, rsa->iqmp,
                                rsa->mont_p, ctx);
        }
        if (!ok) {
          BN_free(inv_small_mod_large_mont);
          goto err;
        }
        rsa->inv_small_mod_large_mont = inv_small_mod_large_mont;
      }
    }
  }

  rsa->flags |= RSA_FLAG_PRIVATE_KEY_FROZEN;
  ret = 1;

err:
  CRYPTO_MUTEX_unlock_write(&rsa->lock);
  return ret;
}