void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { /* This is essentially a copy of test_exhaustive_verify, with recovery added */ int s, r, msg, key; for (s = 1; s < order; s++) { for (r = 1; r < order; r++) { for (msg = 1; msg < order; msg++) { for (key = 1; key < order; key++) { secp256k1_ge nonconst_ge; secp256k1_ecdsa_recoverable_signature rsig; secp256k1_ecdsa_signature sig; secp256k1_pubkey pk; secp256k1_scalar sk_s, msg_s, r_s, s_s; secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; int recid = 0; int k, should_verify; unsigned char msg32[32]; secp256k1_scalar_set_int(&s_s, s); secp256k1_scalar_set_int(&r_s, r); secp256k1_scalar_set_int(&msg_s, msg); secp256k1_scalar_set_int(&sk_s, key); secp256k1_scalar_get_b32(msg32, &msg_s); /* Verify by hand */ /* Run through every k value that gives us this r and check that *one* works. * Note there could be none, there could be multiple, ECDSA is weird. */ should_verify = 0; for (k = 0; k < order; k++) { secp256k1_scalar check_x_s; r_from_k(&check_x_s, group, k); if (r_s == check_x_s) { secp256k1_scalar_set_int(&s_times_k_s, k); secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); } } /* nb we have a "high s" rule */ should_verify &= !secp256k1_scalar_is_high(&s_s); /* We would like to try recovering the pubkey and checking that it matches, * but pubkey recovery is impossible in the exhaustive tests (the reason * being that there are 12 nonzero r values, 12 nonzero points, and no * overlap between the sets, so there are no valid signatures). */ /* Verify by converting to a standard signature and calling verify */ secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid); secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); secp256k1_pubkey_save(&pk, &nonconst_ge); CHECK(should_verify == secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); } } } } }
void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { int s, r, msg, key; for (s = 1; s < order; s++) { for (r = 1; r < order; r++) { for (msg = 1; msg < order; msg++) { for (key = 1; key < order; key++) { secp256k1_ge nonconst_ge; secp256k1_ecdsa_signature sig; secp256k1_pubkey pk; secp256k1_scalar sk_s, msg_s, r_s, s_s; secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; int k, should_verify; unsigned char msg32[32]; secp256k1_scalar_set_int(&s_s, s); secp256k1_scalar_set_int(&r_s, r); secp256k1_scalar_set_int(&msg_s, msg); secp256k1_scalar_set_int(&sk_s, key); /* Verify by hand */ /* Run through every k value that gives us this r and check that *one* works. * Note there could be none, there could be multiple, ECDSA is weird. */ should_verify = 0; for (k = 0; k < order; k++) { secp256k1_scalar check_x_s; r_from_k(&check_x_s, group, k); if (r_s == check_x_s) { secp256k1_scalar_set_int(&s_times_k_s, k); secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); } } /* nb we have a "high s" rule */ should_verify &= !secp256k1_scalar_is_high(&s_s); /* Verify by calling verify */ secp256k1_ecdsa_signature_save(&sig, &r_s, &s_s); memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); secp256k1_pubkey_save(&pk, &nonconst_ge); secp256k1_scalar_get_b32(msg32, &msg_s); CHECK(should_verify == secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); } } } } }
int secp256k1_ecdsa_verify(const secp256k1_context* ctx, const secp256k1_ecdsa_signature *sig, const unsigned char *msg32, const secp256k1_pubkey *pubkey) { secp256k1_ge q; secp256k1_scalar r, s; secp256k1_scalar m; VERIFY_CHECK(ctx != NULL); ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(msg32 != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(pubkey != NULL); secp256k1_scalar_set_b32(&m, msg32, NULL); secp256k1_ecdsa_signature_load(ctx, &r, &s, sig); return (!secp256k1_scalar_is_high(&s) && secp256k1_pubkey_load(ctx, &q, pubkey) && secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m)); }
int secp256k1_ecdsa_signature_normalize(const secp256k1_context* ctx, secp256k1_ecdsa_signature *sigout, const secp256k1_ecdsa_signature *sigin) { secp256k1_scalar r, s; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sigin != NULL); secp256k1_ecdsa_signature_load(ctx, &r, &s, sigin); ret = secp256k1_scalar_is_high(&s); if (sigout != NULL) { if (ret) { secp256k1_scalar_negate(&s, &s); } secp256k1_ecdsa_signature_save(sigout, &r, &s); } return ret; }
void scalar_test(void) { unsigned char c[32]; /* Set 's' to a random scalar, with value 'snum'. */ secp256k1_scalar_t s; random_scalar_order_test(&s); /* Set 's1' to a random scalar, with value 's1num'. */ secp256k1_scalar_t s1; random_scalar_order_test(&s1); /* Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. */ secp256k1_scalar_t s2; random_scalar_order_test(&s2); secp256k1_scalar_get_b32(c, &s2); #ifndef USE_NUM_NONE secp256k1_num_t snum, s1num, s2num; secp256k1_scalar_get_num(&snum, &s); secp256k1_scalar_get_num(&s1num, &s1); secp256k1_scalar_get_num(&s2num, &s2); secp256k1_num_t order; secp256k1_scalar_order_get_num(&order); secp256k1_num_t half_order = order; secp256k1_num_shift(&half_order, 1); #endif { /* Test that fetching groups of 4 bits from a scalar and recursing n(i)=16*n(i-1)+p(i) reconstructs it. */ secp256k1_scalar_t n; secp256k1_scalar_set_int(&n, 0); for (int i = 0; i < 256; i += 4) { secp256k1_scalar_t t; secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits(&s, 256 - 4 - i, 4)); for (int j = 0; j < 4; j++) { secp256k1_scalar_add(&n, &n, &n); } secp256k1_scalar_add(&n, &n, &t); } CHECK(secp256k1_scalar_eq(&n, &s)); } { /* Test that fetching groups of randomly-sized bits from a scalar and recursing n(i)=b*n(i-1)+p(i) reconstructs it. */ secp256k1_scalar_t n; secp256k1_scalar_set_int(&n, 0); int i = 0; while (i < 256) { int now = (secp256k1_rand32() % 15) + 1; if (now + i > 256) { now = 256 - i; } secp256k1_scalar_t t; secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits_var(&s, 256 - now - i, now)); for (int j = 0; j < now; j++) { secp256k1_scalar_add(&n, &n, &n); } secp256k1_scalar_add(&n, &n, &t); i += now; } CHECK(secp256k1_scalar_eq(&n, &s)); } #ifndef USE_NUM_NONE { /* Test that adding the scalars together is equal to adding their numbers together modulo the order. */ secp256k1_num_t rnum; secp256k1_num_add(&rnum, &snum, &s2num); secp256k1_num_mod(&rnum, &order); secp256k1_scalar_t r; secp256k1_scalar_add(&r, &s, &s2); secp256k1_num_t r2num; secp256k1_scalar_get_num(&r2num, &r); CHECK(secp256k1_num_eq(&rnum, &r2num)); } { /* Test that multipying the scalars is equal to multiplying their numbers modulo the order. */ secp256k1_num_t rnum; secp256k1_num_mul(&rnum, &snum, &s2num); secp256k1_num_mod(&rnum, &order); secp256k1_scalar_t r; secp256k1_scalar_mul(&r, &s, &s2); secp256k1_num_t r2num; secp256k1_scalar_get_num(&r2num, &r); CHECK(secp256k1_num_eq(&rnum, &r2num)); /* The result can only be zero if at least one of the factors was zero. */ CHECK(secp256k1_scalar_is_zero(&r) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_zero(&s2))); /* The results can only be equal to one of the factors if that factor was zero, or the other factor was one. */ CHECK(secp256k1_num_eq(&rnum, &snum) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_one(&s2))); CHECK(secp256k1_num_eq(&rnum, &s2num) == (secp256k1_scalar_is_zero(&s2) || secp256k1_scalar_is_one(&s))); } { /* Check that comparison with zero matches comparison with zero on the number. */ CHECK(secp256k1_num_is_zero(&snum) == secp256k1_scalar_is_zero(&s)); /* Check that comparison with the half order is equal to testing for high scalar. */ CHECK(secp256k1_scalar_is_high(&s) == (secp256k1_num_cmp(&snum, &half_order) > 0)); secp256k1_scalar_t neg; secp256k1_scalar_negate(&neg, &s); secp256k1_num_t negnum; secp256k1_num_sub(&negnum, &order, &snum); secp256k1_num_mod(&negnum, &order); /* Check that comparison with the half order is equal to testing for high scalar after negation. */ CHECK(secp256k1_scalar_is_high(&neg) == (secp256k1_num_cmp(&negnum, &half_order) > 0)); /* Negating should change the high property, unless the value was already zero. */ CHECK((secp256k1_scalar_is_high(&s) == secp256k1_scalar_is_high(&neg)) == secp256k1_scalar_is_zero(&s)); secp256k1_num_t negnum2; secp256k1_scalar_get_num(&negnum2, &neg); /* Negating a scalar should be equal to (order - n) mod order on the number. */ CHECK(secp256k1_num_eq(&negnum, &negnum2)); secp256k1_scalar_add(&neg, &neg, &s); /* Adding a number to its negation should result in zero. */ CHECK(secp256k1_scalar_is_zero(&neg)); secp256k1_scalar_negate(&neg, &neg); /* Negating zero should still result in zero. */ CHECK(secp256k1_scalar_is_zero(&neg)); } { /* Test secp256k1_scalar_mul_shift_var. */ secp256k1_scalar_t r; unsigned int shift = 256 + (secp256k1_rand32() % 257); secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift); secp256k1_num_t rnum; secp256k1_num_mul(&rnum, &s1num, &s2num); secp256k1_num_shift(&rnum, shift - 1); secp256k1_num_t one; unsigned char cone[1] = {0x01}; secp256k1_num_set_bin(&one, cone, 1); secp256k1_num_add(&rnum, &rnum, &one); secp256k1_num_shift(&rnum, 1); secp256k1_num_t rnum2; secp256k1_scalar_get_num(&rnum2, &r); CHECK(secp256k1_num_eq(&rnum, &rnum2)); } #endif { /* Test that scalar inverses are equal to the inverse of their number modulo the order. */ if (!secp256k1_scalar_is_zero(&s)) { secp256k1_scalar_t inv; secp256k1_scalar_inverse(&inv, &s); #ifndef USE_NUM_NONE secp256k1_num_t invnum; secp256k1_num_mod_inverse(&invnum, &snum, &order); secp256k1_num_t invnum2; secp256k1_scalar_get_num(&invnum2, &inv); CHECK(secp256k1_num_eq(&invnum, &invnum2)); #endif secp256k1_scalar_mul(&inv, &inv, &s); /* Multiplying a scalar with its inverse must result in one. */ CHECK(secp256k1_scalar_is_one(&inv)); secp256k1_scalar_inverse(&inv, &inv); /* Inverting one must result in one. */ CHECK(secp256k1_scalar_is_one(&inv)); } } { /* Test commutativity of add. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_add(&r1, &s1, &s2); secp256k1_scalar_add(&r2, &s2, &s1); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test add_bit. */ int bit = secp256k1_rand32() % 256; secp256k1_scalar_t b; secp256k1_scalar_set_int(&b, 1); CHECK(secp256k1_scalar_is_one(&b)); for (int i = 0; i < bit; i++) { secp256k1_scalar_add(&b, &b, &b); } secp256k1_scalar_t r1 = s1, r2 = s1; if (!secp256k1_scalar_add(&r1, &r1, &b)) { /* No overflow happened. */ secp256k1_scalar_add_bit(&r2, bit); CHECK(secp256k1_scalar_eq(&r1, &r2)); } } { /* Test commutativity of mul. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_mul(&r1, &s1, &s2); secp256k1_scalar_mul(&r2, &s2, &s1); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test associativity of add. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_add(&r1, &s1, &s2); secp256k1_scalar_add(&r1, &r1, &s); secp256k1_scalar_add(&r2, &s2, &s); secp256k1_scalar_add(&r2, &s1, &r2); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test associativity of mul. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_mul(&r1, &s1, &s2); secp256k1_scalar_mul(&r1, &r1, &s); secp256k1_scalar_mul(&r2, &s2, &s); secp256k1_scalar_mul(&r2, &s1, &r2); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test distributitivity of mul over add. */ secp256k1_scalar_t r1, r2, t; secp256k1_scalar_add(&r1, &s1, &s2); secp256k1_scalar_mul(&r1, &r1, &s); secp256k1_scalar_mul(&r2, &s1, &s); secp256k1_scalar_mul(&t, &s2, &s); secp256k1_scalar_add(&r2, &r2, &t); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test square. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_sqr(&r1, &s1); secp256k1_scalar_mul(&r2, &s1, &s1); CHECK(secp256k1_scalar_eq(&r1, &r2)); } }
void scalar_test(void) { unsigned char c[32]; /* Set 's' to a random scalar, with value 'snum'. */ secp256k1_rand256_test(c); secp256k1_scalar_t s; secp256k1_scalar_set_b32(&s, c, NULL); secp256k1_num_t snum; secp256k1_num_set_bin(&snum, c, 32); secp256k1_num_mod(&snum, &secp256k1_ge_consts->order); /* Set 's1' to a random scalar, with value 's1num'. */ secp256k1_rand256_test(c); secp256k1_scalar_t s1; secp256k1_scalar_set_b32(&s1, c, NULL); secp256k1_num_t s1num; secp256k1_num_set_bin(&s1num, c, 32); secp256k1_num_mod(&s1num, &secp256k1_ge_consts->order); /* Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. */ secp256k1_rand256_test(c); secp256k1_scalar_t s2; int overflow = 0; secp256k1_scalar_set_b32(&s2, c, &overflow); secp256k1_num_t s2num; secp256k1_num_set_bin(&s2num, c, 32); secp256k1_num_mod(&s2num, &secp256k1_ge_consts->order); { /* Test that fetching groups of 4 bits from a scalar and recursing n(i)=16*n(i-1)+p(i) reconstructs it. */ secp256k1_num_t n, t, m; secp256k1_num_set_int(&n, 0); secp256k1_num_set_int(&m, 16); for (int i = 0; i < 256; i += 4) { secp256k1_num_set_int(&t, secp256k1_scalar_get_bits(&s, 256 - 4 - i, 4)); secp256k1_num_mul(&n, &n, &m); secp256k1_num_add(&n, &n, &t); } CHECK(secp256k1_num_eq(&n, &snum)); } { /* Test that get_b32 returns the same as get_bin on the number. */ unsigned char r1[32]; secp256k1_scalar_get_b32(r1, &s2); unsigned char r2[32]; secp256k1_num_get_bin(r2, 32, &s2num); CHECK(memcmp(r1, r2, 32) == 0); /* If no overflow occurred when assigning, it should also be equal to the original byte array. */ CHECK((memcmp(r1, c, 32) == 0) == (overflow == 0)); } { /* Test that adding the scalars together is equal to adding their numbers together modulo the order. */ secp256k1_num_t rnum; secp256k1_num_add(&rnum, &snum, &s2num); secp256k1_num_mod(&rnum, &secp256k1_ge_consts->order); secp256k1_scalar_t r; secp256k1_scalar_add(&r, &s, &s2); secp256k1_num_t r2num; secp256k1_scalar_get_num(&r2num, &r); CHECK(secp256k1_num_eq(&rnum, &r2num)); } { /* Test that multipying the scalars is equal to multiplying their numbers modulo the order. */ secp256k1_num_t rnum; secp256k1_num_mul(&rnum, &snum, &s2num); secp256k1_num_mod(&rnum, &secp256k1_ge_consts->order); secp256k1_scalar_t r; secp256k1_scalar_mul(&r, &s, &s2); secp256k1_num_t r2num; secp256k1_scalar_get_num(&r2num, &r); CHECK(secp256k1_num_eq(&rnum, &r2num)); /* The result can only be zero if at least one of the factors was zero. */ CHECK(secp256k1_scalar_is_zero(&r) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_zero(&s2))); /* The results can only be equal to one of the factors if that factor was zero, or the other factor was one. */ CHECK(secp256k1_num_eq(&rnum, &snum) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_one(&s2))); CHECK(secp256k1_num_eq(&rnum, &s2num) == (secp256k1_scalar_is_zero(&s2) || secp256k1_scalar_is_one(&s))); } { /* Check that comparison with zero matches comparison with zero on the number. */ CHECK(secp256k1_num_is_zero(&snum) == secp256k1_scalar_is_zero(&s)); /* Check that comparison with the half order is equal to testing for high scalar. */ CHECK(secp256k1_scalar_is_high(&s) == (secp256k1_num_cmp(&snum, &secp256k1_ge_consts->half_order) > 0)); secp256k1_scalar_t neg; secp256k1_scalar_negate(&neg, &s); secp256k1_num_t negnum; secp256k1_num_sub(&negnum, &secp256k1_ge_consts->order, &snum); secp256k1_num_mod(&negnum, &secp256k1_ge_consts->order); /* Check that comparison with the half order is equal to testing for high scalar after negation. */ CHECK(secp256k1_scalar_is_high(&neg) == (secp256k1_num_cmp(&negnum, &secp256k1_ge_consts->half_order) > 0)); /* Negating should change the high property, unless the value was already zero. */ CHECK((secp256k1_scalar_is_high(&s) == secp256k1_scalar_is_high(&neg)) == secp256k1_scalar_is_zero(&s)); secp256k1_num_t negnum2; secp256k1_scalar_get_num(&negnum2, &neg); /* Negating a scalar should be equal to (order - n) mod order on the number. */ CHECK(secp256k1_num_eq(&negnum, &negnum2)); secp256k1_scalar_add(&neg, &neg, &s); /* Adding a number to its negation should result in zero. */ CHECK(secp256k1_scalar_is_zero(&neg)); secp256k1_scalar_negate(&neg, &neg); /* Negating zero should still result in zero. */ CHECK(secp256k1_scalar_is_zero(&neg)); } { /* Test that scalar inverses are equal to the inverse of their number modulo the order. */ if (!secp256k1_scalar_is_zero(&s)) { secp256k1_scalar_t inv; secp256k1_scalar_inverse(&inv, &s); secp256k1_num_t invnum; secp256k1_num_mod_inverse(&invnum, &snum, &secp256k1_ge_consts->order); secp256k1_num_t invnum2; secp256k1_scalar_get_num(&invnum2, &inv); CHECK(secp256k1_num_eq(&invnum, &invnum2)); secp256k1_scalar_mul(&inv, &inv, &s); /* Multiplying a scalar with its inverse must result in one. */ CHECK(secp256k1_scalar_is_one(&inv)); secp256k1_scalar_inverse(&inv, &inv); /* Inverting one must result in one. */ CHECK(secp256k1_scalar_is_one(&inv)); } } { /* Test commutativity of add. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_add(&r1, &s1, &s2); secp256k1_scalar_add(&r2, &s2, &s1); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test commutativity of mul. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_mul(&r1, &s1, &s2); secp256k1_scalar_mul(&r2, &s2, &s1); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test associativity of add. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_add(&r1, &s1, &s2); secp256k1_scalar_add(&r1, &r1, &s); secp256k1_scalar_add(&r2, &s2, &s); secp256k1_scalar_add(&r2, &s1, &r2); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test associativity of mul. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_mul(&r1, &s1, &s2); secp256k1_scalar_mul(&r1, &r1, &s); secp256k1_scalar_mul(&r2, &s2, &s); secp256k1_scalar_mul(&r2, &s1, &r2); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test distributitivity of mul over add. */ secp256k1_scalar_t r1, r2, t; secp256k1_scalar_add(&r1, &s1, &s2); secp256k1_scalar_mul(&r1, &r1, &s); secp256k1_scalar_mul(&r2, &s1, &s); secp256k1_scalar_mul(&t, &s2, &s); secp256k1_scalar_add(&r2, &r2, &t); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test square. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_sqr(&r1, &s1); secp256k1_scalar_mul(&r2, &s1, &s1); CHECK(secp256k1_scalar_eq(&r1, &r2)); } }