/* * Uncompress twisted Edwards curve point. */ int ed_upk(ed_t r, const ed_t p) { int result = 1; fp_t t; TRY { fp_new(t); fp_copy(r->y, p->y); ed_recover_x(t, p->y, core_get()->ed_d, core_get()->ed_a); if (fp_get_bit(t, 0) != fp_get_bit(p->x, 0)) { fp_neg(t, t); } fp_copy(r->x, t); #if ED_ADD == EXTND fp_mul(r->t, r->x, r->y); #endif fp_set_dig(r->z, 1); r->norm = 1; } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t); } return result; }
void fp2_mul_art(fp2_t c, fp2_t a) { fp_t t; fp_null(t); TRY { fp_new(t); #ifdef FP_QNRES /* (a_0 + a_1 * i) * i = -a_1 + a_0 * i. */ fp_copy(t, a[0]); fp_neg(c[0], a[1]); fp_copy(c[1], t); #else /* (a_0 + a_1 * u) * u = (a_1 * u^2) + a_0 * u. */ fp_copy(t, a[0]); fp_neg(c[0], a[1]); for (int i = -1; i > fp_prime_get_qnr(); i--) { fp_sub(c[0], c[0], a[1]); } fp_copy(c[1], t); #endif } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t); } }
int ep_upk(ep_t r, const ep_t p) { fp_t t; int result = 0; fp_null(t); TRY { fp_new(t); ep_rhs(t, p); /* t0 = sqrt(x1^3 + a * x1 + b). */ result = fp_srt(t, t); if (result) { /* Verify if least significant bit of the result matches the * compressed y-coordinate. */ if (fp_get_bit(t, 0) != fp_get_bit(p->y, 0)) { fp_neg(t, t); } fp_copy(r->x, p->x); fp_copy(r->y, t); fp_set_dig(r->z, 1); r->norm = 1; } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t); } return result; }
int fp2_upk(fp2_t c, fp2_t a) { int result, b = fp_get_bit(a[1], 0); fp_t t; fp_null(t); TRY { fp_new(t); /* a_0^2 + a_1^2 = 1, thus a_1^2 = 1 - a_0^2. */ fp_sqr(t, a[0]); fp_sub_dig(t, t, 1); fp_neg(t, t); /* a1 = sqrt(a_0^2). */ result = fp_srt(t, t); if (result) { /* Verify if least significant bit of the result matches the * compressed second coordinate. */ if (fp_get_bit(t, 0) != b) { fp_neg(t, t); } fp_copy(c[0], a[0]); fp_copy(c[1], t); } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t); } return result; }
void ep_curve_set_endom(const fp_t b, const ep_t g, const bn_t r, const bn_t h, const fp_t beta, const bn_t l) { int bits = bn_bits(r); ctx_t *ctx = core_get(); ctx->ep_is_endom = 1; ctx->ep_is_super = 0; fp_zero(ctx->ep_a); fp_copy(ctx->ep_b, b); detect_opt(&(ctx->ep_opt_a), ctx->ep_a); detect_opt(&(ctx->ep_opt_b), ctx->ep_b); #if EP_MUL == LWNAF || EP_FIX == COMBS || EP_FIX == LWNAF || EP_SIM == INTER || !defined(STRIP) fp_copy(ctx->beta, beta); bn_gcd_ext_mid(&(ctx->ep_v1[1]), &(ctx->ep_v1[2]), &(ctx->ep_v2[1]), &(ctx->ep_v2[2]), l, r); /* l = v1[1] * v2[2] - v1[2] * v2[1], r = l / 2. */ bn_mul(&(ctx->ep_v1[0]), &(ctx->ep_v1[1]), &(ctx->ep_v2[2])); bn_mul(&(ctx->ep_v2[0]), &(ctx->ep_v1[2]), &(ctx->ep_v2[1])); bn_sub(&(ctx->ep_r), &(ctx->ep_v1[0]), &(ctx->ep_v2[0])); bn_hlv(&(ctx->ep_r), &(ctx->ep_r)); /* v1[0] = round(v2[2] * 2^|n| / l). */ bn_lsh(&(ctx->ep_v1[0]), &(ctx->ep_v2[2]), bits + 1); if (bn_sign(&(ctx->ep_v1[0])) == BN_POS) { bn_add(&(ctx->ep_v1[0]), &(ctx->ep_v1[0]), &(ctx->ep_r)); } else { bn_sub(&(ctx->ep_v1[0]), &(ctx->ep_v1[0]), &(ctx->ep_r)); } bn_dbl(&(ctx->ep_r), &(ctx->ep_r)); bn_div(&(ctx->ep_v1[0]), &(ctx->ep_v1[0]), &(ctx->ep_r)); if (bn_sign(&ctx->ep_v1[0]) == BN_NEG) { bn_add_dig(&(ctx->ep_v1[0]), &(ctx->ep_v1[0]), 1); } /* v2[0] = round(v1[2] * 2^|n| / l). */ bn_lsh(&(ctx->ep_v2[0]), &(ctx->ep_v1[2]), bits + 1); if (bn_sign(&(ctx->ep_v2[0])) == BN_POS) { bn_add(&(ctx->ep_v2[0]), &(ctx->ep_v2[0]), &(ctx->ep_r)); } else { bn_sub(&(ctx->ep_v2[0]), &(ctx->ep_v2[0]), &(ctx->ep_r)); } bn_div(&(ctx->ep_v2[0]), &(ctx->ep_v2[0]), &(ctx->ep_r)); if (bn_sign(&ctx->ep_v2[0]) == BN_NEG) { bn_add_dig(&(ctx->ep_v2[0]), &(ctx->ep_v2[0]), 1); } bn_neg(&(ctx->ep_v2[0]), &(ctx->ep_v2[0])); #endif ep_norm(&(ctx->ep_g), g); bn_copy(&(ctx->ep_r), r); bn_copy(&(ctx->ep_h), h); #if defined(EP_PRECO) ep_mul_pre((ep_t *)ep_curve_get_tab(), &(ctx->ep_g)); #endif }
void ed_neg(ed_t r, const ed_t p) { #if ED_ADD == PROJC fp_neg(r->x, p->x); fp_copy(r->y, p->y); fp_copy(r->z, p->z); #elif ED_ADD == EXTND fp_neg(r->x, p->x); fp_copy(r->y, p->y); fp_neg(r->t, p->t); fp_copy(r->z, p->z); #endif }
void ed_copy(ed_t r, const ed_t p) { #if ED_ADD == PROJC || ED_ADD == EXTND fp_copy(r->x, p->x); fp_copy(r->y, p->y); fp_copy(r->z, p->z); #endif #if ED_ADD == EXTND fp_copy(r->t, p->t); #endif r->norm = p->norm; }
void ep_neg_projc(ep_t r, const ep_t p) { if (ep_is_infty(p)) { ep_set_infty(r); return; } if (r != p) { fp_copy(r->x, p->x); fp_copy(r->z, p->z); } fp_neg(r->y, p->y); r->norm = p->norm; }
void fp_invn_low(dig_t *c, const dig_t *a) { mp_size_t cn; align dig_t s[FP_DIGS], t[2 * FP_DIGS], u[FP_DIGS + 1]; #if FP_RDC == MONTY dv_zero(t + FP_DIGS, FP_DIGS); dv_copy(t, a, FP_DIGS); fp_rdcn_low(u, t); #else fp_copy(u, a); #endif dv_copy(s, fp_prime_get(), FP_DIGS); mpn_gcdext(t, c, &cn, u, FP_DIGS, s, FP_DIGS); if (cn < 0) { dv_zero(c - cn, FP_DIGS + cn); mpn_sub_n(c, fp_prime_get(), c, FP_DIGS); } else { dv_zero(c + cn, FP_DIGS - cn); } #if FP_RDC == MONTY dv_zero(t, FP_DIGS); dv_copy(t + FP_DIGS, c, FP_DIGS); mpn_tdiv_qr(u, c, 0, t, 2 * FP_DIGS, fp_prime_get(), FP_DIGS); #endif }
void pp_dbl_k12_basic(fp12_t l, ep2_t r, ep2_t q, ep_t p) { fp2_t s; ep2_t t; int one = 1, zero = 0; fp2_null(s); ep2_null(t); TRY { fp2_new(s); ep2_new(t); ep2_copy(t, q); ep2_dbl_slp_basic(r, s, q); fp12_zero(l); if (ep2_curve_is_twist() == EP_MTYPE) { one ^= 1; zero ^= 1; } fp_mul(l[one][zero][0], s[0], p->x); fp_mul(l[one][zero][1], s[1], p->x); fp2_mul(l[one][one], s, t->x); fp2_sub(l[one][one], t->y, l[one][one]); fp_copy(l[zero][zero][0], p->y); } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp2_free(s); ep2_free(t); } }
void ed_norm_sim(ed_t *r, const ed_t *t, int n) { int i; fp_t a[n]; for (i = 0; i < n; i++) { fp_null(a[i]); } TRY { for (i = 0; i < n; i++) { fp_new(a[i]); fp_copy(a[i], t[i]->z); } fp_inv_sim(a, (const fp_t *)a, n); for (i = 0; i < n; i++) { fp_mul(r[i]->x, t[i]->x, a[i]); fp_mul(r[i]->y, t[i]->y, a[i]); #if ED_ADD == EXTND fp_mul(r[i]->t, t[i]->t, a[i]); #endif fp_set_dig(r[i]->z, 1); } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { for (i = 0; i < n; i++) { fp_free(a[i]); } } }
void fp_rdc_monty_basic(fp_t c, dv_t a) { int i; dig_t r, c0, c1, *tmp, u0; const dig_t *p = NULL; tmp = a; u0 = *(fp_prime_get_rdc()); p = fp_prime_get(); c1 = 0; for (i = 0; i < FP_DIGS; i++, tmp++) { r = (dig_t)(*tmp * u0); c0 = fp_mula_low(tmp, fp_prime_get(), r); /* We must use this because the size (FP_DIGS - i) is variable. */ c1 += bn_add1_low(tmp + FP_DIGS, tmp + FP_DIGS, c0, FP_DIGS - i); } fp_copy(c, a + FP_DIGS); for (i = 0; i < c1; i++) { fp_subn_low(c, c, p); } if (fp_cmpn_low(c, p) != CMP_LT) { fp_subn_low(c, c, p); } }
void fp2_norm_low(fp2_t c, fp2_t a) { fp2_t t; bn_t b; fp2_null(t); bn_null(b); TRY { fp2_new(t); bn_new(b); #if FP_PRIME == 158 fp_dbl(t[0], a[0]); fp_dbl(t[0], t[0]); fp_sub(t[0], t[0], a[1]); fp_dbl(t[1], a[1]); fp_dbl(t[1], t[1]); fp_add(c[1], a[0], t[1]); fp_copy(c[0], t[0]); #elif defined(FP_QNRES) /* If p = 3 mod 8, (1 + i) is a QNR/CNR. */ fp_neg(t[0], a[1]); fp_add(c[1], a[0], a[1]); fp_add(c[0], t[0], a[0]); #else switch (fp_prime_get_mod8()) { case 3: /* If p = 3 mod 8, (1 + u) is a QNR/CNR. */ fp_neg(t[0], a[1]); fp_add(c[1], a[0], a[1]); fp_add(c[0], t[0], a[0]); break; case 5: /* If p = 5 mod 8, (u) is a QNR/CNR. */ fp2_mul_art(c, a); break; case 7: /* If p = 7 mod 8, we choose (2^(lg_4(b-1)) + u) as QNR/CNR. */ fp2_mul_art(t, a); fp2_dbl(c, a); fp_prime_back(b, ep_curve_get_b()); for (int i = 1; i < bn_bits(b) / 2; i++) { fp2_dbl(c, c); } fp2_add(c, c, t); break; default: THROW(ERR_NO_VALID); break; } #endif } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp2_free(t); bn_free(b); } }
/** * Normalizes a point represented in projective coordinates. * * @param r - the result. * @param p - the point to normalize. */ static void ep_norm_imp(ep_t r, const ep_t p, int inverted) { if (!p->norm) { fp_t t0, t1; fp_null(t0); fp_null(t1); TRY { fp_new(t0); fp_new(t1); if (inverted) { fp_copy(t1, p->z); } else { fp_inv(t1, p->z); } fp_sqr(t0, t1); fp_mul(r->x, p->x, t0); fp_mul(t0, t0, t1); fp_mul(r->y, p->y, t0); fp_set_dig(r->z, 1); } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t0); fp_free(t1); } }
int fp2_srt(fp2_t c, fp2_t a) { int r = 0; fp_t t1; fp_t t2; fp_t t3; fp_null(t1); fp_null(t2); fp_null(t3); TRY { fp_new(t1); fp_new(t2); fp_new(t3); /* t1 = a[0]^2 - u^2 * a[1]^2 */ fp_sqr(t1, a[0]); fp_sqr(t2, a[1]); for (int i = -1; i > fp_prime_get_qnr(); i--) { fp_add(t1, t1, t2); } for (int i = 0; i <= fp_prime_get_qnr(); i++) { fp_sub(t1, t1, t2); } fp_add(t1, t1, t2); if (fp_srt(t2, t1)) { /* t1 = (a_0 + sqrt(t1)) / 2 */ fp_add(t1, a[0], t2); fp_set_dig(t3, 2); fp_inv(t3, t3); fp_mul(t1, t1, t3); if (!fp_srt(t3, t1)) { /* t1 = (a_0 - sqrt(t1)) / 2 */ fp_sub(t1, a[0], t2); fp_set_dig(t3, 2); fp_inv(t3, t3); fp_mul(t1, t1, t3); fp_srt(t3, t1); } /* c_0 = sqrt(t1) */ fp_copy(c[0], t3); /* c_1 = a_1 / (2 * sqrt(t1)) */ fp_dbl(t3, t3); fp_inv(t3, t3); fp_mul(c[1], a[1], t3); r = 1; } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t1); fp_free(t2); fp_free(t3); } return r; }
void fp_lsh(fp_t c, const fp_t a, int bits) { int digits; SPLIT(bits, digits, bits, FP_DIG_LOG); if (digits) { fp_lshd_low(c, a, digits); } else { if (c != a) { fp_copy(c, a); } } switch (bits) { case 0: break; case 1: fp_lsh1_low(c, c); break; default: fp_lshb_low(c, c, bits); break; } }
/* c = a mod 2**d */ void fp_mod_2d(fp_int *a, int b, fp_int *c) { int x; /* zero if count less than or equal to zero */ if (b <= 0) { fp_zero(c); return; } /* get copy of input */ fp_copy(a, c); /* if 2**d is larger than we just return */ if (b >= (DIGIT_BIT * a->used)) { return; } /* zero digits above the last digit of the modulus */ for (x = (b / DIGIT_BIT) + ((b % DIGIT_BIT) == 0 ? 0 : 1); x < c->used; x++) { c->dp[x] = 0; } /* clear the digit that is not completely outside/inside the modulus */ c->dp[b / DIGIT_BIT] &= ~((fp_digit)0) >> (DIGIT_BIT - b); fp_clamp (c); }
static int copy(void *a, void *b) { LTC_ARGCHK(a != NULL); LTC_ARGCHK(b != NULL); fp_copy(a, b); return CRYPT_OK; }
void ep_rhs(fp_t rhs, const ep_t p) { fp_t t0; fp_t t1; fp_null(t0); fp_null(t1); TRY { fp_new(t0); fp_new(t1); /* t0 = x1^2. */ fp_sqr(t0, p->x); /* t1 = x1^3. */ fp_mul(t1, t0, p->x); /* t1 = x1^3 + a * x1 + b. */ switch (ep_curve_opt_a()) { case OPT_ZERO: break; case OPT_ONE: fp_add(t1, t1, p->x); break; #if FP_RDC != MONTY case OPT_DIGIT: fp_mul_dig(t0, p->x, ep_curve_get_a()[0]); fp_add(t1, t1, t0); break; #endif default: fp_mul(t0, p->x, ep_curve_get_a()); fp_add(t1, t1, t0); break; } switch (ep_curve_opt_b()) { case OPT_ZERO: break; case OPT_ONE: fp_add_dig(t1, t1, 1); break; #if FP_RDC != MONTY case OPT_DIGIT: fp_add_dig(t1, t1, ep_curve_get_b()[0]); break; #endif default: fp_add(t1, t1, ep_curve_get_b()); break; } fp_copy(rhs, t1); } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t0); fp_free(t1); } }
void fp_rdc_basic(fp_t c, dv_t a) { dv_t t0, t1, t2, t3; dv_null(t0); dv_null(t1); dv_null(t2); dv_null(t3); TRY { dv_new(t0); dv_new(t1); dv_new(t2); dv_new(t3); dv_copy(t2, a, 2 * FP_DIGS); dv_copy(t3, fp_prime_get(), FP_DIGS); bn_divn_low(t0, t1, t2, 2 * FP_DIGS, t3, FP_DIGS); fp_copy(c, t1); } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { dv_free(t0); dv_free(t1); dv_free(t2); dv_free(t3); } }
/* * Compress twisted Edwards curve point by compressing the x-coordinate to its sign. */ void ed_pck(ed_t r, const ed_t p) { fp_copy(r->y, p->y); int b = fp_get_bit(p->x, 0); fp_zero(r->x); fp_set_bit(r->x, 0, b); fp_set_dig(r->z, 1); r->norm = 1; }
void fp2_pck(fp2_t c, fp2_t a) { int b = fp_get_bit(a[1], 0); if (fp2_test_uni(c)) { fp_copy(c[0], a[0]); fp_zero(c[1]); fp_set_bit(c[1], 0, b); } else { fp2_copy(c, a); } }
/** * Compute the Miller loop for pairings of type G_1 x G_2 over the bits of a * given parameter. * * @param[out] r - the result. * @param[out] t - the resulting point. * @param[in] p - the first pairing argument in affine coordinates. * @param[in] q - the second pairing argument in affine coordinates. * @param[in] a - the loop parameter. */ static void pp_mil_lit_k2(fp2_t r, ep_t *t, ep_t *p, ep_t *q, int m, bn_t a) { fp2_t l, _l; ep_t _q[m]; int i, j; fp2_null(_l); ep_null(_q); TRY { fp2_new(_l); for (j = 0; j < m; j++) { ep_null(_q[j]); ep_new(_q[j]); ep_copy(t[j], p[j]); ep_neg(_q[j], q[j]); } for (i = bn_bits(a) - 2; i >= 0; i--) { fp2_sqr(r, r); for (j = 0; j < m; j++) { pp_dbl_k2(l, t[j], t[j], _q[j]); fp_copy(_l[0], l[1]); fp_copy(_l[1], l[0]); fp2_mul(r, r, _l); if (bn_get_bit(a, i)) { pp_add_k2(l, t[j], p[j], q[j]); fp_copy(_l[0], l[1]); fp_copy(_l[1], l[0]); fp2_mul(r, r, _l); } } } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp2_free(_l); fp2_free(m); ep_free(_q); } }
void ep_curve_set_super(const fp_t a, const fp_t b, const ep_t g, const bn_t r, const bn_t h) { ctx_t *ctx = core_get(); ctx->ep_is_endom = 0; ctx->ep_is_super = 1; fp_copy(ctx->ep_a, a); fp_copy(ctx->ep_b, b); detect_opt(&(ctx->ep_opt_a), ctx->ep_a); detect_opt(&(ctx->ep_opt_b), ctx->ep_b); ep_norm(&(ctx->ep_g), g); bn_copy(&(ctx->ep_r), r); bn_copy(&(ctx->ep_h), h); #if defined(EP_PRECO) ep_mul_pre((ep_t *)ep_curve_get_tab(), &(ctx->ep_g)); #endif }
/* generic PxQ multiplier */ void fp_mul_comba(fp_int *A, fp_int *B, fp_int *C) { int ix, iy, iz, tx, ty, pa; fp_digit c0, c1, c2, *tmpx, *tmpy; fp_int tmp, *dst; COMBA_START; COMBA_CLEAR; /* get size of output and trim */ pa = A->used + B->used; if (pa >= FP_SIZE) { pa = FP_SIZE-1; } if (A == C || B == C) { fp_zero(&tmp); dst = &tmp; } else { fp_zero(C); dst = C; } for (ix = 0; ix < pa; ix++) { /* get offsets into the two bignums */ ty = MIN(ix, B->used-1); tx = ix - ty; /* setup temp aliases */ tmpx = A->dp + tx; tmpy = B->dp + ty; /* this is the number of times the loop will iterrate, essentially its while (tx++ < a->used && ty-- >= 0) { ... } */ iy = MIN(A->used-tx, ty+1); /* execute loop */ COMBA_FORWARD; for (iz = 0; iz < iy; ++iz) { MULADD(*tmpx++, *tmpy--); } /* store term */ COMBA_STORE(dst->dp[ix]); } COMBA_FINI; dst->used = pa; dst->sign = A->sign ^ B->sign; fp_clamp(dst); fp_copy(dst, C); }
void fp3_mul_art(fp3_t c, fp3_t a) { fp_t t; fp_null(t); TRY { fp_new(t); /* (a_0 + a_1 * u + a_1 * u^2) * u = a_0 * u + a_1 * u^2 + a_1 * u^3. */ fp_copy(t, a[0]); fp_dbl(c[0], a[2]); fp_neg(c[0], c[0]); fp_copy(c[2], a[1]); fp_copy(c[1], t); } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t); } }
void fp_hlvm_low(dig_t *c, const dig_t *a) { dig_t carry = 0; if (a[0] & 1) { carry = fp_addn_low(c, a, fp_prime_get()); } else { fp_copy(c, a); } fp_rsh1_low(c, c); if (carry) { c[FP_DIGS - 1] ^= ((dig_t)1 << (FP_DIGIT - 1)); } }
void fp_exp_monty(fp_t c, const fp_t a, const bn_t b) { fp_t t[2]; fp_null(t[0]); fp_null(t[1]); if (bn_is_zero(b)) { fp_set_dig(c, 1); return; } TRY { fp_new(t[0]); fp_new(t[1]); fp_set_dig(t[0], 1); fp_copy(t[1], a); for (int i = bn_bits(b) - 1; i >= 0; i--) { int j = bn_get_bit(b, i); dv_swap_cond(t[0], t[1], FP_DIGS, j ^ 1); fp_mul(t[0], t[0], t[1]); fp_sqr(t[1], t[1]); dv_swap_cond(t[0], t[1], FP_DIGS, j ^ 1); } if (bn_sign(b) == BN_NEG) { fp_inv(c, t[0]); } else { fp_copy(c, t[0]); } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp_free(t[1]); fp_free(t[0]); } }
/* c = (a, b) */ void fp_gcd(fp_int *a, fp_int *b, fp_int *c) { fp_int u, v, r; /* either zero than gcd is the largest */ if (fp_iszero (a) == 1 && fp_iszero (b) == 0) { fp_abs (b, c); return; } if (fp_iszero (a) == 0 && fp_iszero (b) == 1) { fp_abs (a, c); return; } /* optimized. At this point if a == 0 then * b must equal zero too */ if (fp_iszero (a) == 1) { fp_zero(c); return; } /* sort inputs */ if (fp_cmp_mag(a, b) != FP_LT) { fp_init_copy(&u, a); fp_init_copy(&v, b); } else { fp_init_copy(&u, b); fp_init_copy(&v, a); } fp_zero(&r); while (fp_iszero(&v) == FP_NO) { fp_mod(&u, &v, &r); fp_copy(&v, &u); fp_copy(&r, &v); } fp_copy(&u, c); }
void fp2_mul_frb(fp2_t c, fp2_t a, int i, int j) { ctx_t *ctx = core_get(); if (i == 2) { fp_mul(c[0], a[0], ctx->fp2_p2[j - 1]); fp_mul(c[1], a[1], ctx->fp2_p2[j - 1]); } else { #if ALLOC == AUTO if (i == 1) { fp2_mul(c, a, ctx->fp2_p[j - 1]); } else { fp2_mul(c, a, ctx->fp2_p3[j - 1]); } #else fp2_t t; fp2_null(t); TRY { fp2_new(t); if (i == 1) { fp_copy(t[0], ctx->fp2_p[j - 1][0]); fp_copy(t[1], ctx->fp2_p[j - 1][1]); } else { fp_copy(t[0], ctx->fp2_p3[j - 1][0]); fp_copy(t[1], ctx->fp2_p3[j - 1][1]); } fp2_mul(c, a, t); } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp2_free(t); } #endif } }