void test_sqrt(const secp256k1_fe_t *a, const secp256k1_fe_t *k) { secp256k1_fe_t r1, r2; int v = secp256k1_fe_sqrt(&r1, a); CHECK((v == 0) == (k == NULL)); if (k != NULL) { /* Check that the returned root is +/- the given known answer */ secp256k1_fe_negate(&r2, &r1, 1); secp256k1_fe_add(&r1, k); secp256k1_fe_add(&r2, k); secp256k1_fe_normalize(&r1); secp256k1_fe_normalize(&r2); CHECK(secp256k1_fe_is_zero(&r1) || secp256k1_fe_is_zero(&r2)); } }
void bench_field_normalize(void* arg) { int i; bench_inv_t *data = (bench_inv_t*)arg; for (i = 0; i < 2000000; i++) { secp256k1_fe_normalize(&data->fe_x); } }
void random_field_element_magnitude(secp256k1_fe_t *fe) { secp256k1_fe_normalize(fe); int n = secp256k1_rand32() % 4; for (int i = 0; i < n; i++) { secp256k1_fe_negate(fe, fe, 1 + 2*i); secp256k1_fe_negate(fe, fe, 2 + 2*i); } }
void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) { secp256k1_fe x; unsigned char x_bin[32]; k %= EXHAUSTIVE_TEST_ORDER; x = group[k].x; secp256k1_fe_normalize(&x); secp256k1_fe_get_b32(x_bin, &x); secp256k1_scalar_set_b32(r, x_bin, NULL); }
void random_fe_non_zero(secp256k1_fe_t *nz) { int tries = 10; while (--tries >= 0) { random_fe(nz); secp256k1_fe_normalize(nz); if (!secp256k1_fe_is_zero(nz)) break; } /* Infinitesimal probability of spurious failure here */ CHECK(tries >= 0); }
void run_sqr(void) { secp256k1_fe_t x, s; { secp256k1_fe_set_int(&x, 1); secp256k1_fe_negate(&x, &x, 1); for (int i=1; i<=512; ++i) { secp256k1_fe_mul_int(&x, 2); secp256k1_fe_normalize(&x); secp256k1_fe_sqr(&s, &x); } } }
void test_ge(void) { char ca[135]; char cb[68]; int rlen; secp256k1_ge_t a, b, i, n; random_group_element_test(&a); random_group_element_test(&b); rlen = sizeof(ca); secp256k1_ge_get_hex(ca,&rlen,&a); CHECK(rlen > 4 && rlen <= (int)sizeof(ca)); rlen = sizeof(cb); secp256k1_ge_get_hex(cb,&rlen,&b); /* Intentionally undersized buffer. */ n = a; secp256k1_fe_normalize(&a.y); secp256k1_fe_negate(&n.y, &a.y, 1); secp256k1_ge_set_infinity(&i); random_field_element_magnitude(&a.x); random_field_element_magnitude(&a.y); random_field_element_magnitude(&b.x); random_field_element_magnitude(&b.y); random_field_element_magnitude(&n.x); random_field_element_magnitude(&n.y); secp256k1_gej_t aj, bj, ij, nj; random_group_element_jacobian_test(&aj, &a); random_group_element_jacobian_test(&bj, &b); secp256k1_gej_set_infinity(&ij); random_group_element_jacobian_test(&nj, &n); random_field_element_magnitude(&aj.x); random_field_element_magnitude(&aj.y); random_field_element_magnitude(&aj.z); random_field_element_magnitude(&bj.x); random_field_element_magnitude(&bj.y); random_field_element_magnitude(&bj.z); random_field_element_magnitude(&nj.x); random_field_element_magnitude(&nj.y); random_field_element_magnitude(&nj.z); /* gej + gej adds */ secp256k1_gej_t aaj; secp256k1_gej_add_var(&aaj, &aj, &aj); secp256k1_gej_t abj; secp256k1_gej_add_var(&abj, &aj, &bj); secp256k1_gej_t aij; secp256k1_gej_add_var(&aij, &aj, &ij); secp256k1_gej_t anj; secp256k1_gej_add_var(&anj, &aj, &nj); secp256k1_gej_t iaj; secp256k1_gej_add_var(&iaj, &ij, &aj); secp256k1_gej_t iij; secp256k1_gej_add_var(&iij, &ij, &ij); /* gej + ge adds */ secp256k1_gej_t aa; secp256k1_gej_add_ge_var(&aa, &aj, &a); secp256k1_gej_t ab; secp256k1_gej_add_ge_var(&ab, &aj, &b); secp256k1_gej_t ai; secp256k1_gej_add_ge_var(&ai, &aj, &i); secp256k1_gej_t an; secp256k1_gej_add_ge_var(&an, &aj, &n); secp256k1_gej_t ia; secp256k1_gej_add_ge_var(&ia, &ij, &a); secp256k1_gej_t ii; secp256k1_gej_add_ge_var(&ii, &ij, &i); /* const gej + ge adds */ secp256k1_gej_t aac; secp256k1_gej_add_ge(&aac, &aj, &a); secp256k1_gej_t abc; secp256k1_gej_add_ge(&abc, &aj, &b); secp256k1_gej_t anc; secp256k1_gej_add_ge(&anc, &aj, &n); secp256k1_gej_t iac; secp256k1_gej_add_ge(&iac, &ij, &a); CHECK(secp256k1_gej_is_infinity(&an)); CHECK(secp256k1_gej_is_infinity(&anj)); CHECK(secp256k1_gej_is_infinity(&anc)); gej_equals_gej(&aa, &aaj); gej_equals_gej(&aa, &aac); gej_equals_gej(&ab, &abj); gej_equals_gej(&ab, &abc); gej_equals_gej(&an, &anj); gej_equals_gej(&an, &anc); gej_equals_gej(&ia, &iaj); gej_equals_gej(&ai, &aij); gej_equals_gej(&ii, &iij); ge_equals_gej(&a, &ai); ge_equals_gej(&a, &ai); ge_equals_gej(&a, &iaj); ge_equals_gej(&a, &iaj); ge_equals_gej(&a, &iac); }
int check_fe_equal(const secp256k1_fe_t *a, const secp256k1_fe_t *b) { secp256k1_fe_t an = *a; secp256k1_fe_normalize(&an); secp256k1_fe_t bn = *b; secp256k1_fe_normalize(&bn); return secp256k1_fe_equal(&an, &bn); }
void test_ge(void) { secp256k1_ge_t a, b, i, n; random_group_element_test(&a); random_group_element_test(&b); n = a; secp256k1_fe_normalize(&a.y); secp256k1_fe_negate(&n.y, &a.y, 1); secp256k1_ge_set_infinity(&i); random_field_element_magnitude(&a.x); random_field_element_magnitude(&a.y); random_field_element_magnitude(&b.x); random_field_element_magnitude(&b.y); random_field_element_magnitude(&n.x); random_field_element_magnitude(&n.y); secp256k1_gej_t aj, bj, ij, nj; random_group_element_jacobian_test(&aj, &a); random_group_element_jacobian_test(&bj, &b); secp256k1_gej_set_infinity(&ij); random_group_element_jacobian_test(&nj, &n); random_field_element_magnitude(&aj.x); random_field_element_magnitude(&aj.y); random_field_element_magnitude(&aj.z); random_field_element_magnitude(&bj.x); random_field_element_magnitude(&bj.y); random_field_element_magnitude(&bj.z); random_field_element_magnitude(&nj.x); random_field_element_magnitude(&nj.y); random_field_element_magnitude(&nj.z); /* gej + gej adds */ secp256k1_gej_t aaj; secp256k1_gej_add_var(&aaj, &aj, &aj); secp256k1_gej_t abj; secp256k1_gej_add_var(&abj, &aj, &bj); secp256k1_gej_t aij; secp256k1_gej_add_var(&aij, &aj, &ij); secp256k1_gej_t anj; secp256k1_gej_add_var(&anj, &aj, &nj); secp256k1_gej_t iaj; secp256k1_gej_add_var(&iaj, &ij, &aj); secp256k1_gej_t iij; secp256k1_gej_add_var(&iij, &ij, &ij); /* gej + ge adds */ secp256k1_gej_t aa; secp256k1_gej_add_ge_var(&aa, &aj, &a); secp256k1_gej_t ab; secp256k1_gej_add_ge_var(&ab, &aj, &b); secp256k1_gej_t ai; secp256k1_gej_add_ge_var(&ai, &aj, &i); secp256k1_gej_t an; secp256k1_gej_add_ge_var(&an, &aj, &n); secp256k1_gej_t ia; secp256k1_gej_add_ge_var(&ia, &ij, &a); secp256k1_gej_t ii; secp256k1_gej_add_ge_var(&ii, &ij, &i); /* const gej + ge adds */ secp256k1_gej_t aac; secp256k1_gej_add_ge(&aac, &aj, &a); secp256k1_gej_t abc; secp256k1_gej_add_ge(&abc, &aj, &b); secp256k1_gej_t anc; secp256k1_gej_add_ge(&anc, &aj, &n); secp256k1_gej_t iac; secp256k1_gej_add_ge(&iac, &ij, &a); CHECK(secp256k1_gej_is_infinity(&an)); CHECK(secp256k1_gej_is_infinity(&anj)); CHECK(secp256k1_gej_is_infinity(&anc)); gej_equals_gej(&aa, &aaj); gej_equals_gej(&aa, &aac); gej_equals_gej(&ab, &abj); gej_equals_gej(&ab, &abc); gej_equals_gej(&an, &anj); gej_equals_gej(&an, &anc); gej_equals_gej(&ia, &iaj); gej_equals_gej(&ai, &aij); gej_equals_gej(&ii, &iij); ge_equals_gej(&a, &ai); ge_equals_gej(&a, &ai); ge_equals_gej(&a, &iaj); ge_equals_gej(&a, &iaj); ge_equals_gej(&a, &iac); }
void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { int i, j, k; /* Loop */ for (i = 1; i < order; i++) { /* message */ for (j = 1; j < order; j++) { /* key */ for (k = 1; k < order; k++) { /* nonce */ const int starting_k = k; secp256k1_fe r_dot_y_normalized; secp256k1_ecdsa_recoverable_signature rsig; secp256k1_ecdsa_signature sig; secp256k1_scalar sk, msg, r, s, expected_r; unsigned char sk32[32], msg32[32]; int expected_recid; int recid; secp256k1_scalar_set_int(&msg, i); secp256k1_scalar_set_int(&sk, j); secp256k1_scalar_get_b32(sk32, &sk); secp256k1_scalar_get_b32(msg32, &msg); secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k); /* Check directly */ secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig); r_from_k(&expected_r, group, k); CHECK(r == expected_r); CHECK((k * s) % order == (i + r * j) % order || (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); /* In computing the recid, there is an overflow condition that is disabled in * scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value * will exceed the group order, and our signing code always holds out for r * values that don't overflow, so with a proper overflow check the tests would * loop indefinitely. */ r_dot_y_normalized = group[k].y; secp256k1_fe_normalize(&r_dot_y_normalized); /* Also the recovery id is flipped depending if we hit the low-s branch */ if ((k * s) % order == (i + r * j) % order) { expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 1 : 0; } else { expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 0 : 1; } CHECK(recid == expected_recid); /* Convert to a standard sig then check */ secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); /* Note that we compute expected_r *after* signing -- this is important * because our nonce-computing function function might change k during * signing. */ r_from_k(&expected_r, group, k); CHECK(r == expected_r); CHECK((k * s) % order == (i + r * j) % order || (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); /* Overflow means we've tried every possible nonce */ if (k < starting_k) { break; } } } } }