void fp2_norm_low(fp2_t c, fp2_t a) { fp2_t t; bn_t b; fp2_null(t); bn_null(b); TRY { fp2_new(t); bn_new(b); #if FP_PRIME == 158 fp_dbl(t[0], a[0]); fp_dbl(t[0], t[0]); fp_sub(t[0], t[0], a[1]); fp_dbl(t[1], a[1]); fp_dbl(t[1], t[1]); fp_add(c[1], a[0], t[1]); fp_copy(c[0], t[0]); #elif defined(FP_QNRES) /* If p = 3 mod 8, (1 + i) is a QNR/CNR. */ fp_neg(t[0], a[1]); fp_add(c[1], a[0], a[1]); fp_add(c[0], t[0], a[0]); #else switch (fp_prime_get_mod8()) { case 3: /* If p = 3 mod 8, (1 + u) is a QNR/CNR. */ fp_neg(t[0], a[1]); fp_add(c[1], a[0], a[1]); fp_add(c[0], t[0], a[0]); break; case 5: /* If p = 5 mod 8, (u) is a QNR/CNR. */ fp2_mul_art(c, a); break; case 7: /* If p = 7 mod 8, we choose (2^(lg_4(b-1)) + u) as QNR/CNR. */ fp2_mul_art(t, a); fp2_dbl(c, a); fp_prime_back(b, ep_curve_get_b()); for (int i = 1; i < bn_bits(b) / 2; i++) { fp2_dbl(c, c); } fp2_add(c, c, t); break; default: THROW(ERR_NO_VALID); break; } #endif } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp2_free(t); bn_free(b); } }
void fp2_mul_art(fp2_t c, fp2_t a) { fp_t t; fp_null(t); TRY { fp_new(t); #ifdef FP_QNRES /* (a_0 + a_1 * i) * i = -a_1 + a_0 * i. */ fp_copy(t, a[0]); fp_neg(c[0], a[1]); fp_copy(c[1], t); #else /* (a_0 + a_1 * u) * u = (a_1 * u^2) + a_0 * u. */ fp_copy(t, a[0]); fp_neg(c[0], a[1]); for (int i = -1; i > fp_prime_get_qnr(); i--) { fp_sub(c[0], c[0], a[1]); } fp_copy(c[1], t); #endif } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t); } }
int fp2_upk(fp2_t c, fp2_t a) { int result, b = fp_get_bit(a[1], 0); fp_t t; fp_null(t); TRY { fp_new(t); /* a_0^2 + a_1^2 = 1, thus a_1^2 = 1 - a_0^2. */ fp_sqr(t, a[0]); fp_sub_dig(t, t, 1); fp_neg(t, t); /* a1 = sqrt(a_0^2). */ result = fp_srt(t, t); if (result) { /* Verify if least significant bit of the result matches the * compressed second coordinate. */ if (fp_get_bit(t, 0) != b) { fp_neg(t, t); } fp_copy(c[0], a[0]); fp_copy(c[1], t); } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t); } return result; }
void ed_neg(ed_t r, const ed_t p) { #if ED_ADD == PROJC fp_neg(r->x, p->x); fp_copy(r->y, p->y); fp_copy(r->z, p->z); #elif ED_ADD == EXTND fp_neg(r->x, p->x); fp_copy(r->y, p->y); fp_neg(r->t, p->t); fp_copy(r->z, p->z); #endif }
void fp2_mul_nor_basic(fp2_t c, fp2_t a) { fp2_t t; bn_t b; fp2_null(t); bn_null(b); TRY { fp2_new(t); bn_new(b); #ifdef FP_QNRES /* If p = 3 mod 8, (1 + i) is a QNR/CNR. */ fp_neg(t[0], a[1]); fp_add(c[1], a[0], a[1]); fp_add(c[0], t[0], a[0]); #else switch (fp_prime_get_mod8()) { case 3: /* If p = 3 mod 8, (1 + u) is a QNR/CNR. */ fp_neg(t[0], a[1]); fp_add(c[1], a[0], a[1]); fp_add(c[0], t[0], a[0]); break; case 1: case 5: /* If p = 5 mod 8, (u) is a QNR/CNR. */ fp2_mul_art(c, a); break; case 7: /* If p = 7 mod 8, we choose (4 + u) is a QNR/CNR. */ fp2_mul_art(t, a); fp2_dbl(c, a); fp2_dbl(c, c); fp2_add(c, c, t); break; default: THROW(ERR_NO_VALID); } #endif } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp2_free(t); bn_free(b); } }
/* * Uncompress twisted Edwards curve point. */ int ed_upk(ed_t r, const ed_t p) { int result = 1; fp_t t; TRY { fp_new(t); fp_copy(r->y, p->y); ed_recover_x(t, p->y, core_get()->ed_d, core_get()->ed_a); if (fp_get_bit(t, 0) != fp_get_bit(p->x, 0)) { fp_neg(t, t); } fp_copy(r->x, t); #if ED_ADD == EXTND fp_mul(r->t, r->x, r->y); #endif fp_set_dig(r->z, 1); r->norm = 1; } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t); } return result; }
/** * Compute the Miller loop for pairings of type G_2 x G_1 over the bits of a * given parameter. * * @param[out] r - the result. * @param[out] t - the resulting point. * @param[in] q - the first pairing argument in affine coordinates. * @param[in] p - the second pairing argument in affine coordinates. * @param[in] n - the number of pairings to evaluate. * @param[in] a - the loop parameter. */ static void pp_mil_k12(fp12_t r, ep2_t *t, ep2_t *q, ep_t *p, int m, bn_t a) { fp12_t l; ep_t _p[m]; int i, j; if (m == 0) { return; } fp12_null(l); TRY { fp12_new(l); for (j = 0; j < m; j++) { ep_null(_p[j]); ep_new(_p[j]); #if EP_ADD == BASIC ep_neg(_p[j], p[i]); #else fp_add(_p[j]->x, p[j]->x, p[j]->x); fp_add(_p[j]->x, _p[j]->x, p[j]->x); fp_neg(_p[j]->y, p[j]->y); #endif ep2_copy(t[j], q[j]); } fp12_zero(l); /* Precomputing. */ pp_dbl_k12(r, t[0], t[0], _p[0]); if (bn_get_bit(a, bn_bits(a) - 2)) { for (j = 0; j < m; j++) { pp_add_k12(l, t[j], q[j], p[j]); fp12_mul_dxs(r, r, l); } } for (i = bn_bits(a) - 3; i >= 0; i--) { fp12_sqr(r, r); for (j = 0; j < m; j++) { pp_dbl_k12(l, t[j], t[j], _p[j]); fp12_mul_dxs(r, r, l); if (bn_get_bit(a, i)) { pp_add_k12(l, t[j], q[j], p[j]); fp12_mul_dxs(r, r, l); } } } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp12_free(l); for (j = 0; j < m; j++) { ep_free(_p[j]); } } }
int ep_upk(ep_t r, const ep_t p) { fp_t t; int result = 0; fp_null(t); TRY { fp_new(t); ep_rhs(t, p); /* t0 = sqrt(x1^3 + a * x1 + b). */ result = fp_srt(t, t); if (result) { /* Verify if least significant bit of the result matches the * compressed y-coordinate. */ if (fp_get_bit(t, 0) != fp_get_bit(p->y, 0)) { fp_neg(t, t); } fp_copy(r->x, p->x); fp_copy(r->y, t); fp_set_dig(r->z, 1); r->norm = 1; } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t); } return result; }
static int neg(void *a, void *b) { LTC_ARGCHK(a != NULL); LTC_ARGCHK(b != NULL); fp_neg(((fp_int*)a), ((fp_int*)b)); return CRYPT_OK; }
static void fp_sub(element_ptr c, element_ptr a, element_ptr b) { eptr ad = (eptr)a->data, bd = (eptr)b->data; if (!ad->flag) { fp_neg(c, b); } else if (!bd->flag) { fp_set(c, a); } else { fptr p = (fptr)c->field->data; size_t t = p->limbs; eptr cd = (eptr)c->data; int i = mpn_cmp(ad->d, bd->d, t); if (i == 0) { cd->flag = 0; } else { cd->flag = 2; mpn_sub_n(cd->d, ad->d, bd->d, t); if (i < 0) { mpn_add_n(cd->d, cd->d, p->primelimbs, t); } } } }
static void fp_mul_si(element_ptr e, element_ptr a, signed long int op) { fp_field_data_ptr p = e->field->data; size_t t = p->limbs; mp_limb_t *tmp = _alloca((t + 1) * sizeof(mp_limb_t)); mp_limb_t qp[2]; tmp[t] = mpn_mul_1(tmp, a->data, t, labs(op)); mpn_tdiv_qr(qp, e->data, 0, tmp, t + 1, p->primelimbs, t); if (op < 0) { fp_neg(e, e); } }
void fp2_inv(fp2_t c, fp2_t a) { fp_t t0, t1; fp_null(t0); fp_null(t1); TRY { fp_new(t0); fp_new(t1); /* t0 = a_0^2, t1 = a_1^2. */ fp_sqr(t0, a[0]); fp_sqr(t1, a[1]); /* t1 = 1/(a_0^2 + a_1^2). */ #ifndef FP_QNRES if (fp_prime_get_qnr() != -1) { if (fp_prime_get_qnr() == -2) { fp_dbl(t1, t1); fp_add(t0, t0, t1); } else { if (fp_prime_get_qnr() < 0) { fp_mul_dig(t1, t1, -fp_prime_get_qnr()); fp_add(t0, t0, t1); } else { fp_mul_dig(t1, t1, fp_prime_get_qnr()); fp_sub(t0, t0, t1); } } } else { fp_add(t0, t0, t1); } #else fp_add(t0, t0, t1); #endif fp_inv(t1, t0); /* c_0 = a_0/(a_0^2 + a_1^2). */ fp_mul(c[0], a[0], t1); /* c_1 = - a_1/(a_0^2 + a_1^2). */ fp_mul(c[1], a[1], t1); fp_neg(c[1], c[1]); } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t0); fp_free(t1); } }
void ep_neg_projc(ep_t r, const ep_t p) { if (ep_is_infty(p)) { ep_set_infty(r); return; } if (r != p) { fp_copy(r->x, p->x); fp_copy(r->z, p->z); } fp_neg(r->y, p->y); r->norm = p->norm; }
void ep2_frb(ep2_t r, ep2_t p, int i) { switch (i) { case 0: ep2_copy(r, p); break; case 1: fp2_frb(r->x, p->x, 1); fp2_frb(r->y, p->y, 1); if (ep2_curve_is_twist() == EP_MTYPE) { fp2_mul_frb(r->x, r->x, 1, 4); fp2_mul_art(r->x, r->x); fp2_mul_art(r->y, r->y); } else { fp2_mul_frb(r->x, r->x, 1, 2); } fp2_mul_frb(r->y, r->y, 1, 3); break; case 2: if (ep2_curve_is_twist() == EP_MTYPE) { fp2_mul_frb(r->x, p->x, 2, 4); } else { fp2_mul_frb(r->x, p->x, 2, 2); } fp2_neg(r->y, p->y); break; case 3: if (ep2_curve_is_twist() == EP_MTYPE) { fp2_frb(r->x, p->x, 1); fp2_frb(r->y, p->y, 1); fp2_mul_frb(r->x, r->x, 1, 4); fp2_mul_frb(r->x, r->x, 2, 4); fp2_mul_art(r->x, r->x); fp2_mul_frb(r->y, r->y, 1, 3); fp2_mul_art(r->y, r->y); fp2_neg(r->y, r->y); } else { fp2_frb(r->x, p->x, 1); fp2_mul_frb(r->x, r->x, 3, 2); fp_neg(r->y[0], p->y[0]); fp_copy(r->y[1], p->y[1]); fp2_mul_frb(r->y, r->y, 1, 3); } break; } r->norm = 1; fp_set_dig(r->z[0], 1); fp_zero(r->z[1]); }
void fp18_inv_uni(fp18_t c, fp18_t a) { fp_copy(c[0][0][0], a[0][0][0]); fp_copy(c[0][2][0], a[0][2][0]); fp_copy(c[0][1][1], a[0][1][1]); fp_neg(c[0][1][0], a[0][1][0]); fp_neg(c[0][0][1], a[0][0][1]); fp_neg(c[0][2][1], a[0][2][1]); fp_neg(c[1][0][0], a[1][0][0]); fp_neg(c[1][2][0], a[1][2][0]); fp_neg(c[1][1][1], a[1][1][1]); fp_copy(c[1][1][0], a[1][1][0]); fp_copy(c[1][0][1], a[1][0][1]); fp_copy(c[1][2][1], a[1][2][1]); fp_copy(c[2][0][0], a[2][0][0]); fp_copy(c[2][2][0], a[2][2][0]); fp_copy(c[2][1][1], a[2][1][1]); fp_neg(c[2][1][0], a[2][1][0]); fp_neg(c[2][0][1], a[2][0][1]); fp_neg(c[2][2][1], a[2][2][1]); }
void ep2_frb(ep2_t r, ep2_t p, int i) { ep2_copy(r, p); switch (i) { case 1: fp2_frb(r->x, r->x, 1); fp2_frb(r->y, r->y, 1); fp2_frb(r->z, r->z, 1); if (ep2_curve_is_twist() == EP_MTYPE) { fp2_mul_frb(r->x, r->x, 1, 4); fp2_mul_art(r->x, r->x); fp2_mul_art(r->y, r->y); } else { fp2_mul_frb(r->x, r->x, 1, 2); } fp2_mul_frb(r->y, r->y, 1, 3); break; case 2: if (ep2_curve_is_twist() == EP_MTYPE) { fp2_mul_frb(r->x, r->x, 2, 4); } else { fp2_mul_frb(r->x, r->x, 2, 2); } fp2_neg(r->y, r->y); break; case 3: if (ep2_curve_is_twist() == EP_MTYPE) { fp2_frb(r->x, r->x, 1); fp2_frb(r->y, r->y, 1); fp2_frb(r->z, r->z, 1); fp2_mul_frb(r->x, r->x, 1, 4); fp2_mul_frb(r->x, r->x, 2, 4); fp2_mul_art(r->x, r->x); fp2_mul_frb(r->y, r->y, 1, 3); fp2_mul_art(r->y, r->y); fp2_neg(r->y, r->y); } else { fp2_frb(r->x, r->x, 1); fp2_mul_frb(r->x, r->x, 3, 2); fp_neg(r->y[0], r->y[0]); fp_copy(r->y[1], r->y[1]); fp2_mul_frb(r->y, r->y, 1, 3); } break; } }
int main() { if (fp_add (1, 1) != 2) fail ("fp_add 1+1"); if (fp_sub (3, 2) != 1) fail ("fp_sub 3-2"); if (fp_mul (2, 3) != 6) fail ("fp_mul 2*3"); if (fp_div (3, 2) != 1.5) fail ("fp_div 3/2"); if (fp_neg (1) != -1) fail ("fp_neg 1"); if (dp_add (1, 1) != 2) fail ("dp_add 1+1"); if (dp_sub (3, 2) != 1) fail ("dp_sub 3-2"); if (dp_mul (2, 3) != 6) fail ("dp_mul 2*3"); if (dp_div (3, 2) != 1.5) fail ("dp_div 3/2"); if (dp_neg (1) != -1) fail ("dp_neg 1"); if (fp_to_dp (1.5) != 1.5) fail ("fp_to_dp 1.5"); if (dp_to_fp (1.5) != 1.5) fail ("dp_to_fp 1.5"); if (floatsisf (1) != 1) fail ("floatsisf 1"); if (floatsidf (1) != 1) fail ("floatsidf 1"); if (fixsfsi (1.42) != 1) fail ("fixsfsi 1.42"); if (fixunssfsi (1.42) != 1) fail ("fixunssfsi 1.42"); if (fixdfsi (1.42) != 1) fail ("fixdfsi 1.42"); if (fixunsdfsi (1.42) != 1) fail ("fixunsdfsi 1.42"); if (eqsf2 (1, 1) == 0) fail ("eqsf2 1==1"); if (eqsf2 (1, 2) != 0) fail ("eqsf2 1==2"); if (nesf2 (1, 2) == 0) fail ("nesf2 1!=1"); if (nesf2 (1, 1) != 0) fail ("nesf2 1!=1"); if (gtsf2 (2, 1) == 0) fail ("gtsf2 2>1"); if (gtsf2 (1, 1) != 0) fail ("gtsf2 1>1"); if (gtsf2 (0, 1) != 0) fail ("gtsf2 0>1"); if (gesf2 (2, 1) == 0) fail ("gesf2 2>=1"); if (gesf2 (1, 1) == 0) fail ("gesf2 1>=1"); if (gesf2 (0, 1) != 0) fail ("gesf2 0>=1"); if (ltsf2 (1, 2) == 0) fail ("ltsf2 1<2"); if (ltsf2 (1, 1) != 0) fail ("ltsf2 1<1"); if (ltsf2 (1, 0) != 0) fail ("ltsf2 1<0"); if (lesf2 (1, 2) == 0) fail ("lesf2 1<=2"); if (lesf2 (1, 1) == 0) fail ("lesf2 1<=1"); if (lesf2 (1, 0) != 0) fail ("lesf2 1<=0"); if (fail_count != 0) abort (); exit (0); }
/** * Detects an optimization based on the curve coefficients. * * @param[out] opt - the resulting optimization. * @param[in] a - the curve coefficient. */ static void detect_opt(int *opt, fp_t a) { fp_t t; fp_null(t); TRY { fp_new(t); fp_prime_conv_dig(t, 3); fp_neg(t, t); if (fp_cmp(a, t) == CMP_EQ) { *opt = OPT_MINUS3; } else { if (fp_is_zero(a)) { *opt = OPT_ZERO; } else { fp_set_dig(t, 1); if (fp_cmp_dig(a, 1) == CMP_EQ) { *opt = OPT_ONE; } else { if (fp_cmp_dig(a, 2) == CMP_EQ) { *opt = OPT_TWO; } else { if (fp_bits(a) <= FP_DIGIT) { *opt = OPT_DIGIT; } else { *opt = RELIC_OPT_NONE; } } } } } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t); } }
void fp3_mul_art(fp3_t c, fp3_t a) { fp_t t; fp_null(t); TRY { fp_new(t); /* (a_0 + a_1 * u + a_1 * u^2) * u = a_0 * u + a_1 * u^2 + a_1 * u^3. */ fp_copy(t, a[0]); fp_dbl(c[0], a[2]); fp_neg(c[0], c[0]); fp_copy(c[2], a[1]); fp_copy(c[1], t); } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t); } }
void pp_dbl_lit_k12(fp12_t l, ep_t r, ep_t p, ep2_t q) { fp_t t0, t1, t2, t3, t4, t5, t6; int one = 1, zero = 0; fp_null(t0); fp_null(t1); fp_null(t2); fp_null(t3); fp_null(t4); fp_null(t5); fp_null(t6); TRY { fp_new(t0); fp_new(t1); fp_new(t2); fp_new(t3); fp_new(t4); fp_new(t5); fp_new(t6); fp_sqr(t0, p->x); fp_sqr(t1, p->y); fp_sqr(t2, p->z); fp_mul(t4, ep_curve_get_b(), t2); fp_dbl(t3, t4); fp_add(t3, t3, t4); fp_add(t4, p->x, p->y); fp_sqr(t4, t4); fp_sub(t4, t4, t0); fp_sub(t4, t4, t1); fp_add(t5, p->y, p->z); fp_sqr(t5, t5); fp_sub(t5, t5, t1); fp_sub(t5, t5, t2); fp_dbl(t6, t3); fp_add(t6, t6, t3); fp_sub(r->x, t1, t6); fp_mul(r->x, r->x, t4); fp_add(r->y, t1, t6); fp_sqr(r->y, r->y); fp_sqr(t4, t3); fp_dbl(t6, t4); fp_add(t6, t6, t4); fp_dbl(t6, t6); fp_dbl(t6, t6); fp_sub(r->y, r->y, t6); fp_mul(r->z, t1, t5); fp_dbl(r->z, r->z); fp_dbl(r->z, r->z); r->norm = 0; if (ep2_curve_is_twist() == EP_MTYPE) { one ^= 1; zero ^= 1; } fp2_dbl(l[zero][one], q->x); fp2_add(l[zero][one], l[zero][one], q->x); fp_mul(l[zero][one][0], l[zero][one][0], t0); fp_mul(l[zero][one][1], l[zero][one][1], t0); fp_sub(l[zero][zero][0], t3, t1); fp_zero(l[zero][zero][1]); fp_neg(t5, t5); fp_mul(l[one][one][0], q->y[0], t5); fp_mul(l[one][one][1], q->y[1], t5); } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t0); fp_free(t1); fp_free(t2); fp_free(t3); fp_free(t4); fp_free(t5); fp_free(t6); } }
void fp2_inv_uni(fp2_t c, fp2_t a) { fp_copy(c[0], a[0]); fp_neg(c[1], a[1]); }
/** * Compute the Miller loop for pairings of type G_2 x G_1 over the bits of a * given parameter represented in sparse form. * * @param[out] r - the result. * @param[out] t - the resulting point. * @param[in] q - the vector of first arguments in affine coordinates. * @param[in] p - the vector of second arguments in affine coordinates. * @param[in] n - the number of pairings to evaluate. * @param[in] s - the loop parameter in sparse form. * @paramin] len - the length of the loop parameter. */ static void pp_mil_sps_k12(fp12_t r, ep2_t *t, ep2_t *q, ep_t *p, int m, int *s, int len) { fp12_t l; ep_t _p[m]; ep2_t _q[m]; int i, j; if (m == 0) { return; } fp12_null(l); TRY { fp12_new(l); fp12_zero(l); for (j = 0; j < m; j++) { ep_null(_p[j]); ep2_null(_q[j]); ep_new(_p[j]); ep2_new(_q[j]); ep2_copy(t[j], q[j]); ep2_neg(_q[j], q[j]); #if EP_ADD == BASIC ep_neg(_p[j], p[j]); #else fp_add(_p[j]->x, p[j]->x, p[j]->x); fp_add(_p[j]->x, _p[j]->x, p[j]->x); fp_neg(_p[j]->y, p[j]->y); #endif } pp_dbl_k12(r, t[0], t[0], _p[0]); for (j = 1; j < m; j++) { pp_dbl_k12(l, t[j], t[j], _p[j]); fp12_mul_dxs(r, r, l); } if (s[len - 2] > 0) { for (j = 0; j < m; j++) { pp_add_k12(l, t[j], q[j], p[j]); fp12_mul_dxs(r, r, l); } } if (s[len - 2] < 0) { for (j = 0; j < m; j++) { pp_add_k12(l, t[j], _q[j], p[j]); fp12_mul_dxs(r, r, l); } } for (i = len - 3; i >= 0; i--) { fp12_sqr(r, r); for (j = 0; j < m; j++) { pp_dbl_k12(l, t[j], t[j], _p[j]); fp12_mul_dxs(r, r, l); if (s[i] > 0) { pp_add_k12(l, t[j], q[j], p[j]); fp12_mul_dxs(r, r, l); } if (s[i] < 0) { pp_add_k12(l, t[j], _q[j], p[j]); fp12_mul_dxs(r, r, l); } } } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp12_free(l); for (j = 0; j < m; j++) { ep_free(_p[j]); ep2_free(_q[j]); } } }
void fp3_mul_nor(fp3_t c, fp3_t a) { fp_copy(c[1], a[0]); fp_dbl(c[0], a[2]); fp_neg(c[0], c[0]); fp_copy(c[2], a[1]); }
/** * Computes the constants required for evaluating Frobenius maps. */ static void fp3_calc() { bn_t e; fp3_t t0, t1, t2; ctx_t *ctx = core_get(); bn_null(e); fp3_null(t0); fp3_null(t1); fp3_null(t2); TRY { bn_new(e); fp3_new(t0); fp3_new(t1); fp3_new(t2); fp_set_dig(ctx->fp3_base[0], -fp_prime_get_cnr()); fp_neg(ctx->fp3_base[0], ctx->fp3_base[0]); e->used = FP_DIGS; dv_copy(e->dp, fp_prime_get(), FP_DIGS); bn_sub_dig(e, e, 1); bn_div_dig(e, e, 3); fp_exp(ctx->fp3_base[0], ctx->fp3_base[0], e); fp_sqr(ctx->fp3_base[1], ctx->fp3_base[0]); fp3_zero(t0); fp_set_dig(t0[1], 1); dv_copy(e->dp, fp_prime_get(), FP_DIGS); bn_sub_dig(e, e, 1); bn_div_dig(e, e, 6); /* t0 = u^((p-1)/6). */ fp3_exp(t0, t0, e); fp_copy(ctx->fp3_p[0], t0[2]); fp3_sqr(t1, t0); fp_copy(ctx->fp3_p[1], t1[1]); fp3_mul(t2, t1, t0); fp_copy(ctx->fp3_p[2], t2[0]); fp3_sqr(t2, t1); fp_copy(ctx->fp3_p[3], t2[2]); fp3_mul(t2, t2, t0); fp_copy(ctx->fp3_p[4], t2[1]); fp_mul(ctx->fp3_p2[0], ctx->fp3_p[0], ctx->fp3_base[1]); fp_mul(t0[0], ctx->fp3_p2[0], ctx->fp3_p[0]); fp_neg(ctx->fp3_p2[0], t0[0]); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(ctx->fp3_p2[0], ctx->fp3_p2[0], t0[0]); } fp_mul(ctx->fp3_p2[1], ctx->fp3_p[1], ctx->fp3_base[0]); fp_mul(ctx->fp3_p2[1], ctx->fp3_p2[1], ctx->fp3_p[1]); fp_sqr(ctx->fp3_p2[2], ctx->fp3_p[2]); fp_mul(ctx->fp3_p2[3], ctx->fp3_p[3], ctx->fp3_base[1]); fp_mul(t0[0], ctx->fp3_p2[3], ctx->fp3_p[3]); fp_neg(ctx->fp3_p2[3], t0[0]); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(ctx->fp3_p2[3], ctx->fp3_p2[3], t0[0]); } fp_mul(ctx->fp3_p2[4], ctx->fp3_p[4], ctx->fp3_base[0]); fp_mul(ctx->fp3_p2[4], ctx->fp3_p2[4], ctx->fp3_p[4]); fp_mul(ctx->fp3_p3[0], ctx->fp3_p[0], ctx->fp3_base[0]); fp_mul(t0[0], ctx->fp3_p3[0], ctx->fp3_p2[0]); fp_neg(ctx->fp3_p3[0], t0[0]); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(ctx->fp3_p3[0], ctx->fp3_p3[0], t0[0]); } fp_mul(ctx->fp3_p3[1], ctx->fp3_p[1], ctx->fp3_base[1]); fp_mul(t0[0], ctx->fp3_p3[1], ctx->fp3_p2[1]); fp_neg(ctx->fp3_p3[1], t0[0]); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(ctx->fp3_p3[1], ctx->fp3_p3[1], t0[0]); } fp_mul(ctx->fp3_p3[2], ctx->fp3_p[2], ctx->fp3_p2[2]); fp_mul(ctx->fp3_p3[3], ctx->fp3_p[3], ctx->fp3_base[0]); fp_mul(t0[0], ctx->fp3_p3[3], ctx->fp3_p2[3]); fp_neg(ctx->fp3_p3[3], t0[0]); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(ctx->fp3_p3[3], ctx->fp3_p3[3], t0[0]); } fp_mul(ctx->fp3_p3[4], ctx->fp3_p[4], ctx->fp3_base[1]); fp_mul(t0[0], ctx->fp3_p3[4], ctx->fp3_p2[4]); fp_neg(ctx->fp3_p3[4], t0[0]); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(ctx->fp3_p3[4], ctx->fp3_p3[4], t0[0]); } for (int i = 0; i < 5; i++) { fp_mul(ctx->fp3_p4[i], ctx->fp3_p[i], ctx->fp3_p3[i]); fp_mul(ctx->fp3_p5[i], ctx->fp3_p2[i], ctx->fp3_p3[i]); } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { bn_free(e); fp3_free(t0); fp3_free(t1); fp3_free(t2); } }
void fp2_neg(fp2_t c, fp2_t a) { fp_neg(c[0], a[0]); fp_neg(c[1], a[1]); }
void fp3_neg(fp3_t c, fp3_t a) { fp_neg(c[0], a[0]); fp_neg(c[1], a[1]); fp_neg(c[2], a[2]); }
void fp3_inv(fp3_t c, fp3_t a) { fp_t v0; fp_t v1; fp_t v2; fp_t t0; fp_null(v0); fp_null(v1); fp_null(v2); fp_null(t0); TRY { fp_new(v0); fp_new(v1); fp_new(v2); fp_new(t0); /* v0 = a_0^2 - B * a_1 * a_2. */ fp_sqr(t0, a[0]); fp_mul(v0, a[1], a[2]); fp_neg(v2, v0); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(v2, v2, v0); } fp_sub(v0, t0, v2); /* v1 = B * a_2^2 - a_0 * a_1. */ fp_sqr(t0, a[2]); fp_neg(v2, t0); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(v2, v2, t0); } fp_mul(v1, a[0], a[1]); fp_sub(v1, v2, v1); /* v2 = a_1^2 - a_0 * a_2. */ fp_sqr(t0, a[1]); fp_mul(v2, a[0], a[2]); fp_sub(v2, t0, v2); fp_mul(t0, a[1], v2); fp_neg(c[1], t0); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(c[1], c[1], t0); } fp_mul(c[0], a[0], v0); fp_mul(t0, a[2], v1); fp_neg(c[2], t0); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(c[2], c[2], t0); } fp_add(t0, c[0], c[1]); fp_add(t0, t0, c[2]); fp_inv(t0, t0); fp_mul(c[0], v0, t0); fp_mul(c[1], v1, t0); fp_mul(c[2], v2, t0); } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(v0); fp_free(v1); fp_free(v2); fp_free(t0); } }