static int ensure_fixed_copy(BIGNUM **out, const BIGNUM *in, int width) { if (*out != NULL) { return 1; } BIGNUM *copy = BN_dup(in); if (copy == NULL || !bn_resize_words(copy, width)) { BN_free(copy); return 0; } *out = copy; return 1; }
static int mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) { assert(ctx != NULL); assert(rsa->n != NULL); assert(rsa->e != NULL); assert(rsa->d != NULL); assert(rsa->p != NULL); assert(rsa->q != NULL); assert(rsa->dmp1 != NULL); assert(rsa->dmq1 != NULL); assert(rsa->iqmp != NULL); BIGNUM *r1, *m1; int ret = 0; BN_CTX_start(ctx); r1 = BN_CTX_get(ctx); m1 = BN_CTX_get(ctx); if (r1 == NULL || m1 == NULL) { goto err; } if (!freeze_private_key(rsa, ctx)) { goto err; } // Implementing RSA with CRT in constant-time is sensitive to which prime is // larger. Canonicalize fields so that |p| is the larger prime. const BIGNUM *dmp1 = rsa->dmp1_fixed, *dmq1 = rsa->dmq1_fixed; const BN_MONT_CTX *mont_p = rsa->mont_p, *mont_q = rsa->mont_q; if (BN_cmp(rsa->p, rsa->q) < 0) { mont_p = rsa->mont_q; mont_q = rsa->mont_p; dmp1 = rsa->dmq1_fixed; dmq1 = rsa->dmp1_fixed; } // Use the minimal-width versions of |n|, |p|, and |q|. Either works, but if // someone gives us non-minimal values, these will be slightly more efficient // on the non-Montgomery operations. const BIGNUM *n = &rsa->mont_n->N; const BIGNUM *p = &mont_p->N; const BIGNUM *q = &mont_q->N; // This is a pre-condition for |mod_montgomery|. It was already checked by the // caller. assert(BN_ucmp(I, n) < 0); if (// |m1| is the result modulo |q|. !mod_montgomery(r1, I, q, mont_q, p, ctx) || !BN_mod_exp_mont_consttime(m1, r1, dmq1, q, ctx, mont_q) || // |r0| is the result modulo |p|. !mod_montgomery(r1, I, p, mont_p, q, ctx) || !BN_mod_exp_mont_consttime(r0, r1, dmp1, p, ctx, mont_p) || // Compute r0 = r0 - m1 mod p. |p| is the larger prime, so |m1| is already // fully reduced mod |p|. !bn_mod_sub_consttime(r0, r0, m1, p, ctx) || // r0 = r0 * iqmp mod p. We use Montgomery multiplication to compute this // in constant time. |inv_small_mod_large_mont| is in Montgomery form and // r0 is not, so the result is taken out of Montgomery form. !BN_mod_mul_montgomery(r0, r0, rsa->inv_small_mod_large_mont, mont_p, ctx) || // r0 = r0 * q + m1 gives the final result. Reducing modulo q gives m1, so // it is correct mod p. Reducing modulo p gives (r0-m1)*iqmp*q + m1 = r0, // so it is correct mod q. Finally, the result is bounded by [m1, n + m1), // and the result is at least |m1|, so this must be the unique answer in // [0, n). !bn_mul_consttime(r0, r0, q, ctx) || !bn_uadd_consttime(r0, r0, m1) || // The result should be bounded by |n|, but fixed-width operations may // bound the width slightly higher, so fix it. !bn_resize_words(r0, n->width)) { goto err; } ret = 1; err: BN_CTX_end(ctx); return ret; }
// freeze_private_key finishes initializing |rsa|'s private key components. // After this function has returned, |rsa| may not be changed. This is needed // because |RSA| is a public struct and, additionally, OpenSSL 1.1.0 opaquified // it wrong (see https://github.com/openssl/openssl/issues/5158). static int freeze_private_key(RSA *rsa, BN_CTX *ctx) { CRYPTO_MUTEX_lock_read(&rsa->lock); int flags = rsa->flags; CRYPTO_MUTEX_unlock_read(&rsa->lock); if (flags & RSA_FLAG_PRIVATE_KEY_FROZEN) { return 1; } int ret = 0; CRYPTO_MUTEX_lock_write(&rsa->lock); if (rsa->flags & RSA_FLAG_PRIVATE_KEY_FROZEN) { ret = 1; goto err; } // |rsa->n| is public. Normalize the width. bn_set_minimal_width(rsa->n); if (rsa->mont_n == NULL) { rsa->mont_n = BN_MONT_CTX_new_for_modulus(rsa->n, ctx); if (rsa->mont_n == NULL) { goto err; } } // The only public upper-bound of |rsa->d| is the bit length of |rsa->n|. The // ASN.1 serialization of RSA private keys unfortunately leaks the byte length // of |rsa->d|, but normalize it so we only leak it once, rather than per // operation. if (rsa->d != NULL && !bn_resize_words(rsa->d, rsa->n->width)) { goto err; } if (rsa->p != NULL && rsa->q != NULL) { // |p| and |q| have public bit lengths. bn_set_minimal_width(rsa->p); bn_set_minimal_width(rsa->q); if (rsa->mont_p == NULL) { rsa->mont_p = BN_MONT_CTX_new_for_modulus(rsa->p, ctx); if (rsa->mont_p == NULL) { goto err; } } if (rsa->mont_q == NULL) { rsa->mont_q = BN_MONT_CTX_new_for_modulus(rsa->q, ctx); if (rsa->mont_q == NULL) { goto err; } } if (rsa->dmp1 != NULL && rsa->dmq1 != NULL) { // Key generation relies on this function to compute |iqmp|. if (rsa->iqmp == NULL) { BIGNUM *iqmp = BN_new(); if (iqmp == NULL || !bn_mod_inverse_secret_prime(iqmp, rsa->q, rsa->p, ctx, rsa->mont_p)) { BN_free(iqmp); goto err; } rsa->iqmp = iqmp; } // CRT components are only publicly bounded by their corresponding // moduli's bit lengths. if (!bn_resize_words(rsa->dmp1, rsa->p->width) || !bn_resize_words(rsa->dmq1, rsa->q->width) || !bn_resize_words(rsa->iqmp, rsa->p->width)) { goto err; } // Compute |inv_small_mod_large_mont|. Note that it is always modulo the // larger prime, independent of what is stored in |rsa->iqmp|. if (rsa->inv_small_mod_large_mont == NULL) { BIGNUM *inv_small_mod_large_mont = BN_new(); int ok; if (BN_cmp(rsa->p, rsa->q) < 0) { ok = inv_small_mod_large_mont != NULL && bn_mod_inverse_secret_prime(inv_small_mod_large_mont, rsa->p, rsa->q, ctx, rsa->mont_q) && BN_to_montgomery(inv_small_mod_large_mont, inv_small_mod_large_mont, rsa->mont_q, ctx); } else { ok = inv_small_mod_large_mont != NULL && BN_to_montgomery(inv_small_mod_large_mont, rsa->iqmp, rsa->mont_p, ctx); } if (!ok) { BN_free(inv_small_mod_large_mont); goto err; } rsa->inv_small_mod_large_mont = inv_small_mod_large_mont; } } } rsa->flags |= RSA_FLAG_PRIVATE_KEY_FROZEN; ret = 1; err: CRYPTO_MUTEX_unlock_write(&rsa->lock); return ret; }