void fp_invn_low(dig_t *c, const dig_t *a) { mp_size_t cn; align dig_t s[FP_DIGS], t[2 * FP_DIGS], u[FP_DIGS + 1]; #if FP_RDC == MONTY dv_zero(t + FP_DIGS, FP_DIGS); dv_copy(t, a, FP_DIGS); fp_rdcn_low(u, t); #else fp_copy(u, a); #endif dv_copy(s, fp_prime_get(), FP_DIGS); mpn_gcdext(t, c, &cn, u, FP_DIGS, s, FP_DIGS); if (cn < 0) { dv_zero(c - cn, FP_DIGS + cn); mpn_sub_n(c, fp_prime_get(), c, FP_DIGS); } else { dv_zero(c + cn, FP_DIGS - cn); } #if FP_RDC == MONTY dv_zero(t, FP_DIGS); dv_copy(t + FP_DIGS, c, FP_DIGS); mpn_tdiv_qr(u, c, 0, t, 2 * FP_DIGS, fp_prime_get(), FP_DIGS); #endif }
void fp_addc_low(dig_t *c, const dig_t *a, const dig_t *b) { dig_t carry = fp_addd_low(c, a, b); if (carry || (fp_cmpn_low(c + FP_DIGS, fp_prime_get()) != CMP_LT)) { carry = fp_subn_low(c + FP_DIGS, c + FP_DIGS, fp_prime_get()); } }
void fp_rdc_monty_basic(fp_t c, dv_t a) { int i; dig_t r, c0, c1, *tmp, u0; const dig_t *p = NULL; tmp = a; u0 = *(fp_prime_get_rdc()); p = fp_prime_get(); c1 = 0; for (i = 0; i < FP_DIGS; i++, tmp++) { r = (dig_t)(*tmp * u0); c0 = fp_mula_low(tmp, fp_prime_get(), r); /* We must use this because the size (FP_DIGS - i) is variable. */ c1 += bn_add1_low(tmp + FP_DIGS, tmp + FP_DIGS, c0, FP_DIGS - i); } fp_copy(c, a + FP_DIGS); for (i = 0; i < c1; i++) { fp_subn_low(c, c, p); } if (fp_cmpn_low(c, p) != CMP_LT) { fp_subn_low(c, c, p); } }
void fp_param_print(void) { util_banner("Prime modulus:", 0); util_print(" "); #if ALLOC == AUTO fp_print(fp_prime_get()); #else fp_print((const fp_t)fp_prime_get()); #endif }
void fp_rand(fp_t a) { int bits, digits; rand_bytes((uint8_t *)a, RLC_FP_DIGS * sizeof(dig_t)); RLC_RIP(bits, digits, RLC_FP_BITS); if (bits > 0) { dig_t mask = ((dig_t)1 << (dig_t)bits) - 1; a[RLC_FP_DIGS - 1] &= mask; } while (dv_cmp(a, fp_prime_get(), RLC_FP_DIGS) != RLC_LT) { fp_subn_low(a, a, fp_prime_get()); } }
void fp2_mulc_low(dv2_t c, fp2_t a, fp2_t b) { align dig_t t0[2 * FP_DIGS], t1[2 * FP_DIGS], t2[2 * FP_DIGS]; /* Karatsuba algorithm. */ /* t0 = a_0 + a_1, t1 = b_0 + b_1. */ fp_addn_low(t0, a[0], a[1]); fp_addn_low(t1, b[0], b[1]); /* c_0 = a_0 * b_0, c_1 = a_1 * b_1, t2 = (a_0 + a_1) * (b_0 + b_1). */ fp_muln_low(c[0], a[0], b[0]); fp_muln_low(c[1], a[1], b[1]); fp_muln_low(t2, t0, t1); /* t0 = (a_0 * b_0) + (a_1 * b_1). */ fp_addd_low(t0, c[0], c[1]); /* c_0 = (a_0 * b_0) + u^2 * (a_1 * b_1). */ fp_subd_low(c[0], c[0], c[1]); #ifndef FP_QNRES /* t1 = u^2 * (a_1 * b_1). */ for (int i = -1; i > fp_prime_get_qnr(); i--) { fp_subd_low(c[0], c[0], c[1]); } #endif /* c_1 = (t2 - t0). */ fp_subd_low(c[1], t2, t0); /* c_0 = c_0 + 2^N * p/4. */ bn_lshb_low(c[0] + FP_DIGS - 1, c[0] + FP_DIGS - 1, FP_DIGS + 1, 2); fp_addn_low(c[0] + FP_DIGS, c[0] + FP_DIGS, fp_prime_get()); bn_rshb_low(c[0] + FP_DIGS - 1, c[0] + FP_DIGS - 1, FP_DIGS + 1, 2); }
int fp3_srt(fp3_t c, fp3_t a) { int r = 0; fp3_t t0, t1, t2, t3; bn_t e; fp3_null(t0); fp3_null(t1); fp3_null(t2); fp3_null(t3); bn_null(e); TRY { fp3_new(t0); fp3_new(t1); fp3_new(t2); fp3_new(t3); bn_new(e); fp3_dbl(t3, a); fp3_frb(t0, t3, 1); fp3_sqr(t1, t0); fp3_mul(t2, t1, t0); fp3_mul(t1, t1, t2); fp3_frb(t0, t0, 1); fp3_mul(t3, t3, t1); fp3_mul(t0, t0, t3); e->used = FP_DIGS; dv_copy(e->dp, fp_prime_get(), FP_DIGS); bn_sub_dig(e, e, 5); bn_div_dig(e, e, 8); fp3_exp(t0, t0, e); fp3_mul(t0, t0, t2); fp3_sqr(t1, t0); fp3_mul(t1, t1, a); fp3_dbl(t1, t1); fp3_mul(t0, t0, a); fp_sub_dig(t1[0], t1[0], 1); fp3_mul(c, t0, t1); fp3_sqr(t0, c); if (fp3_cmp(t0, a) == CMP_EQ) { r = 1; } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { fp3_free(t0); fp3_free(t1); fp3_free(t2); fp3_free(t3); bn_free(e); } return r; }
void fp_rdc_basic(fp_t c, dv_t a) { dv_t t0, t1, t2, t3; dv_null(t0); dv_null(t1); dv_null(t2); dv_null(t3); TRY { dv_new(t0); dv_new(t1); dv_new(t2); dv_new(t3); dv_copy(t2, a, 2 * FP_DIGS); dv_copy(t3, fp_prime_get(), FP_DIGS); bn_divn_low(t0, t1, t2, 2 * FP_DIGS, t3, FP_DIGS); fp_copy(c, t1); } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { dv_free(t0); dv_free(t1); dv_free(t2); dv_free(t3); } }
void pp_exp_k2(fp2_t c, fp2_t a) { bn_t e, n; bn_null(n); bn_null(e); TRY { bn_new(n); bn_new(e); ep_curve_get_ord(n); fp2_conv_uni(c, a); dv_copy(e->dp, fp_prime_get(), FP_DIGS); e->used = FP_DIGS; e->sign = BN_POS; bn_add_dig(e, e, 1); bn_div(e, e, n); fp2_exp_uni(c, c, e); } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { bn_free(n); bn_free(e); } }
void fp_rdcn_low(dig_t *c, dig_t *a) { int i; dig_t r, c0, c1, u, *tmp; const dig_t *m; u = *(fp_prime_get_rdc()); m = fp_prime_get(); tmp = a; c1 = 0; for (i = 0; i < FP_DIGS; i++, tmp++) { r = (dig_t)(*tmp * u); c0 = mpn_addmul_1(tmp, m, FP_DIGS, r); c1 += mpn_add_1(tmp + FP_DIGS, tmp + FP_DIGS, FP_DIGS - i, c0); } for (i = 0; i < FP_DIGS; i++, tmp++) { c[i] = *tmp; } for (i = 0; i < c1; i++) { fp_subn_low(c, c, m); } if (fp_cmp(c, m) != CMP_LT) { fp_subn_low(c, c, m); } }
void fp_subc_low(dig_t *c, const dig_t *a, const dig_t *b) { dig_t carry = fp_subd_low(c, a, b); if (carry) { fp_addn_low(c + FP_DIGS, c + FP_DIGS, fp_prime_get()); } }
void fp_dblm_low(dig_t *c, const dig_t *a) { int i; dig_t carry, c0, c1, r0, r1; carry = 0; for (i = 0; i < FP_DIGS; i++, a++) { r0 = (*a) + (*a); c0 = (r0 < (*a)); r1 = r0 + carry; c1 = (r1 < r0); carry = c0 | c1; c[i] = r1; } if (carry || (fp_cmpn_low(c, fp_prime_get()) != CMP_LT)) { carry = fp_subn_low(c, c, fp_prime_get()); } }
void fp_subm_low(dig_t *c, const dig_t *a, const dig_t *b) { dig_t carry; carry = fp_subn_low(c, a, b); if (carry) { fp_addn_low(c, c, fp_prime_get()); } }
void fp2_nord_low(dv2_t c, dv2_t a) { dv2_t t; bn_t b; dv2_null(t); bn_null(b); TRY { dv2_new(t); bn_new(b); #ifdef FP_QNRES /* If p = 3 mod 8, (1 + i) is a QNR/CNR. */ /* (a_0 + a_1 * i) * (1 + i) = (a_0 - a_1) + (a_0 + a_1) * u. */ dv_copy(t[0], a[1], 2 * FP_DIGS); fp_addc_low(c[1], a[0], a[1]); fp_subc_low(c[0], a[0], t[0]); #else switch (fp_prime_get_mod8()) { case 3: /* If p = 3 mod 8, (1 + u) is a QNR, u^2 = -1. */ /* (a_0 + a_1 * u) * (1 + u) = (a_0 - a_1) + (a_0 + a_1) * u. */ dv_copy(t[0], a[1], 2 * FP_DIGS); fp_addc_low(c[1], a[0], a[1]); fp_subc_low(c[0], a[0], t[0]); break; case 1: case 5: /* If p = 1,5 mod 8, (u) is a QNR. */ dv_copy(t[0], a[0], 2 * FP_DIGS); dv_zero(t[1], FP_DIGS); dv_copy(t[1] + FP_DIGS, fp_prime_get(), FP_DIGS); fp_subc_low(c[0], t[1], a[1]); for (int i = -1; i > fp_prime_get_qnr(); i--) { fp_subc_low(c[0], c[0], a[1]); } dv_copy(c[1], t[0], 2 * FP_DIGS); break; case 7: /* If p = 7 mod 8, (2 + u) is a QNR/CNR. */ fp2_addc_low(t, a, a); fp_subc_low(c[0], t[0], a[1]); fp_addc_low(c[1], t[1], a[0]); break; default: THROW(ERR_NO_VALID); break; } #endif } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { dv2_free(t); bn_free(b); } }
void fp_rdcn_low(dig_t *c, dig_t *a) { dig_t r1, *m, *tmpc; m = fp_prime_get(); tmpc = c; r1 = fp_rdci_low(c, a); if (r1 || fp_cmp(c, m) != CMP_LT) { fp_subn_low(c, c, m); } }
void fp_hlvm_low(dig_t *c, const dig_t *a) { dig_t carry = 0; if (a[0] & 1) { carry = fp_addn_low(c, a, fp_prime_get()); } else { fp_copy(c, a); } fp_rsh1_low(c, c); if (carry) { c[FP_DIGS - 1] ^= ((dig_t)1 << (FP_DIGIT - 1)); } }
void fp_rdcn_low(dig_t *c, dig_t *a) { int i, j; dig_t r0, r1, r2, u, v; dig_t *m, *tmp, *tmpm, *tmpc; dig_t t[2 * FP_DIGS] = {0}; m = fp_prime_get(); tmpc = c; r1 = fp_rdci_low(c, a); if (r1 || fp_cmp(c, m) != CMP_LT) { fp_subn_low(c, c, m); } }
void fp_subc_low(dig_t *c, const dig_t *a, const dig_t *b) { int i; dig_t carry, r0, diff; /* Zero the carry. */ carry = 0; for (i = 0; i < 2 * FP_DIGS; i++, a++, b++) { diff = (*a) - (*b); r0 = diff - carry; carry = ((*a) < (*b)) || (carry && !diff); c[i] = r0; } if (carry) { fp_addn_low(c + FP_DIGS, c + FP_DIGS, fp_prime_get()); } }
void fp_invn_low(dig_t *c, const dig_t *a) { bn_st e; bn_init(&e, RLC_FP_DIGS); e.used = RLC_FP_DIGS; dv_copy(e.dp, fp_prime_get(), RLC_FP_DIGS); bn_sub1_low(e.dp, e.dp, 2, RLC_FP_DIGS); #if AUTO == ALLOC fp_exp(c, a, &e); #else fp_exp(c, (const fp_t)a, &e); #endif bn_clean(&e); }
void fp_hlvd_low(dig_t *c, const dig_t *a) { dig_t carry = 0; if (a[0] & 1) { carry = fp_addn_low(c, a, fp_prime_get()); } else { dv_copy(c, a, FP_DIGS); } fp_add1_low(c + FP_DIGS, a + FP_DIGS, carry); carry = fp_rsh1_low(c + FP_DIGS, c + FP_DIGS); fp_rsh1_low(c, c); if (carry) { c[FP_DIGS - 1] ^= ((dig_t)1 << (FP_DIGIT - 1)); } }
void fp_rdcn_low2(dig_t *c, dig_t *a) { int i, j; dig_t r0, r1, r2, u; dig_t *m, *tmp, *tmpm, *tmpc; u = *(fp_prime_get_rdc()); m = fp_prime_get(); tmpc = c; r0 = r1 = r2 = 0; for (i = 0; i < FP_DIGS; i++, tmpc++, a++) { tmp = c; tmpm = m + i; for (j = 0; j < i; j++, tmp++, tmpm--) { COMBA_STEP(r2, r1, r0, *tmp, *tmpm); } COMBA_ADD(r2, r1, r0, *a); *tmpc = (dig_t)(r0 * u); COMBA_STEP(r2, r1, r0, *tmpc, *m); r0 = r1; r1 = r2; r2 = 0; } for (i = FP_DIGS; i < 2 * FP_DIGS - 1; i++, a++) { tmp = c + (i - FP_DIGS + 1); tmpm = m + FP_DIGS - 1; for (j = i - FP_DIGS + 1; j < FP_DIGS; j++, tmp++, tmpm--) { COMBA_STEP(r2, r1, r0, *tmp, *tmpm); } COMBA_ADD(r2, r1, r0, *a); c[i - FP_DIGS] = r0; r0 = r1; r1 = r2; r2 = 0; } COMBA_ADD(r2, r1, r0, *a); c[FP_DIGS - 1] = r0; if (r1 || fp_cmpn_low(c, m) != CMP_LT) { fp_subn_low(c, c, m); } }
void fp_print(const fp_t a) { int i; bn_t t; bn_null(t); TRY { bn_new(t); #if FP_RDC == MONTY if (a != fp_prime_get()) { fp_prime_back(t, a); } else { bn_read_raw(t, a, RLC_FP_DIGS); } #else bn_read_raw(t, a, RLC_FP_DIGS); #endif for (i = RLC_FP_DIGS - 1; i > 0; i--) { if (i >= t->used) { util_print_dig(0, 1); } else { util_print_dig(t->dp[i], 1); } util_print(" "); } util_print_dig(t->dp[0], 1); util_print("\n"); } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { bn_free(t); } }
int fp_srt(fp_t c, const fp_t a) { bn_t e; fp_t t0; fp_t t1; int r = 0; bn_null(e); fp_null(t0); fp_null(t1); TRY { bn_new(e); fp_new(t0); fp_new(t1); /* Make e = p. */ e->used = FP_DIGS; dv_copy(e->dp, fp_prime_get(), FP_DIGS); if (fp_prime_get_mod8() == 3 || fp_prime_get_mod8() == 7) { /* Easy case, compute a^((p + 1)/4). */ bn_add_dig(e, e, 1); bn_rsh(e, e, 2); fp_exp(t0, a, e); fp_sqr(t1, t0); r = (fp_cmp(t1, a) == CMP_EQ); fp_copy(c, t0); } else { int f = 0, m = 0; /* First, check if there is a root. Compute t1 = a^((p - 1)/2). */ bn_rsh(e, e, 1); fp_exp(t0, a, e); if (fp_cmp_dig(t0, 1) != CMP_EQ) { /* Nope, there is no square root. */ r = 0; } else { r = 1; /* Find a quadratic non-residue modulo p, that is a number t2 * such that (t2 | p) = t2^((p - 1)/2)!= 1. */ do { fp_rand(t1); fp_exp(t0, t1, e); } while (fp_cmp_dig(t0, 1) == CMP_EQ); /* Write p - 1 as (e * 2^f), odd e. */ bn_lsh(e, e, 1); while (bn_is_even(e)) { bn_rsh(e, e, 1); f++; } /* Compute t2 = t2^e. */ fp_exp(t1, t1, e); /* Compute t1 = a^e, c = a^((e + 1)/2) = a^(e/2 + 1), odd e. */ bn_rsh(e, e, 1); fp_exp(t0, a, e); fp_mul(e->dp, t0, a); fp_sqr(t0, t0); fp_mul(t0, t0, a); fp_copy(c, e->dp); while (1) { if (fp_cmp_dig(t0, 1) == CMP_EQ) { break; } fp_copy(e->dp, t0); for (m = 0; (m < f) && (fp_cmp_dig(t0, 1) != CMP_EQ); m++) { fp_sqr(t0, t0); } fp_copy(t0, e->dp); for (int i = 0; i < f - m - 1; i++) { fp_sqr(t1, t1); } fp_mul(c, c, t1); fp_sqr(t1, t1); fp_mul(t0, t0, t1); f = m; } } } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { bn_free(e); fp_free(t0); fp_free(t1); } return r; }
void fp_rdc_quick(fp_t c, dv_t a) { fp_rdcs_low(c, a, fp_prime_get()); }
/** * Computes the constantes required for evaluating Frobenius maps. */ static void fp2_calc() { bn_t e; fp2_t t0; fp2_t t1; ctx_t *ctx = core_get(); bn_null(e); fp2_null(t0); fp2_null(t1); TRY { bn_new(e); fp2_new(t0); fp2_new(t1); fp2_zero(t0); fp_set_dig(t0[0], 1); fp2_mul_nor(t0, t0); e->used = FP_DIGS; dv_copy(e->dp, fp_prime_get(), FP_DIGS); bn_sub_dig(e, e, 1); bn_div_dig(e, e, 6); fp2_exp(t0, t0, e); #if ALLOC == AUTO fp2_copy(ctx->fp2_p[0], t0); fp2_sqr(ctx->fp2_p[1], ctx->fp2_p[0]); fp2_mul(ctx->fp2_p[2], ctx->fp2_p[1], ctx->fp2_p[0]); fp2_sqr(ctx->fp2_p[3], ctx->fp2_p[1]); fp2_mul(ctx->fp2_p[4], ctx->fp2_p[3], ctx->fp2_p[0]); #else fp_copy(ctx->fp2_p[0][0], t0[0]); fp_copy(ctx->fp2_p[0][1], t0[1]); fp2_sqr(t1, t0); fp_copy(ctx->fp2_p[1][0], t1[0]); fp_copy(ctx->fp2_p[1][1], t1[1]); fp2_mul(t1, t1, t0); fp_copy(ctx->fp2_p[2][0], t1[0]); fp_copy(ctx->fp2_p[2][1], t1[1]); fp2_sqr(t1, t0); fp2_sqr(t1, t1); fp_copy(ctx->fp2_p[3][0], t1[0]); fp_copy(ctx->fp2_p[3][1], t1[1]); fp2_mul(t1, t1, t0); fp_copy(ctx->fp2_p[4][0], t1[0]); fp_copy(ctx->fp2_p[4][1], t1[1]); #endif fp2_frb(t1, t0, 1); fp2_mul(t0, t1, t0); fp_copy(ctx->fp2_p2[0], t0[0]); fp_sqr(ctx->fp2_p2[1], ctx->fp2_p2[0]); fp_mul(ctx->fp2_p2[2], ctx->fp2_p2[1], ctx->fp2_p2[0]); fp_sqr(ctx->fp2_p2[3], ctx->fp2_p2[1]); for (int i = 0; i < 5; i++) { fp_mul(ctx->fp2_p3[i][0], ctx->fp2_p2[i % 3], ctx->fp2_p[i][0]); fp_mul(ctx->fp2_p3[i][1], ctx->fp2_p2[i % 3], ctx->fp2_p[i][1]); } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { bn_free(e); fp2_free(t0); fp2_free(t1); } }
void fp_addm_low(dig_t *c, const dig_t *a, const dig_t *b) { fp_addn_low(c, a, b); if (fp_cmp(c, fp_prime_get()) != CMP_LT) { fp_subn_low(c, c, fp_prime_get()); } }
/** * Computes the constants required for evaluating Frobenius maps. */ static void fp3_calc() { bn_t e; fp3_t t0, t1, t2; ctx_t *ctx = core_get(); bn_null(e); fp3_null(t0); fp3_null(t1); fp3_null(t2); TRY { bn_new(e); fp3_new(t0); fp3_new(t1); fp3_new(t2); fp_set_dig(ctx->fp3_base[0], -fp_prime_get_cnr()); fp_neg(ctx->fp3_base[0], ctx->fp3_base[0]); e->used = FP_DIGS; dv_copy(e->dp, fp_prime_get(), FP_DIGS); bn_sub_dig(e, e, 1); bn_div_dig(e, e, 3); fp_exp(ctx->fp3_base[0], ctx->fp3_base[0], e); fp_sqr(ctx->fp3_base[1], ctx->fp3_base[0]); fp3_zero(t0); fp_set_dig(t0[1], 1); dv_copy(e->dp, fp_prime_get(), FP_DIGS); bn_sub_dig(e, e, 1); bn_div_dig(e, e, 6); /* t0 = u^((p-1)/6). */ fp3_exp(t0, t0, e); fp_copy(ctx->fp3_p[0], t0[2]); fp3_sqr(t1, t0); fp_copy(ctx->fp3_p[1], t1[1]); fp3_mul(t2, t1, t0); fp_copy(ctx->fp3_p[2], t2[0]); fp3_sqr(t2, t1); fp_copy(ctx->fp3_p[3], t2[2]); fp3_mul(t2, t2, t0); fp_copy(ctx->fp3_p[4], t2[1]); fp_mul(ctx->fp3_p2[0], ctx->fp3_p[0], ctx->fp3_base[1]); fp_mul(t0[0], ctx->fp3_p2[0], ctx->fp3_p[0]); fp_neg(ctx->fp3_p2[0], t0[0]); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(ctx->fp3_p2[0], ctx->fp3_p2[0], t0[0]); } fp_mul(ctx->fp3_p2[1], ctx->fp3_p[1], ctx->fp3_base[0]); fp_mul(ctx->fp3_p2[1], ctx->fp3_p2[1], ctx->fp3_p[1]); fp_sqr(ctx->fp3_p2[2], ctx->fp3_p[2]); fp_mul(ctx->fp3_p2[3], ctx->fp3_p[3], ctx->fp3_base[1]); fp_mul(t0[0], ctx->fp3_p2[3], ctx->fp3_p[3]); fp_neg(ctx->fp3_p2[3], t0[0]); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(ctx->fp3_p2[3], ctx->fp3_p2[3], t0[0]); } fp_mul(ctx->fp3_p2[4], ctx->fp3_p[4], ctx->fp3_base[0]); fp_mul(ctx->fp3_p2[4], ctx->fp3_p2[4], ctx->fp3_p[4]); fp_mul(ctx->fp3_p3[0], ctx->fp3_p[0], ctx->fp3_base[0]); fp_mul(t0[0], ctx->fp3_p3[0], ctx->fp3_p2[0]); fp_neg(ctx->fp3_p3[0], t0[0]); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(ctx->fp3_p3[0], ctx->fp3_p3[0], t0[0]); } fp_mul(ctx->fp3_p3[1], ctx->fp3_p[1], ctx->fp3_base[1]); fp_mul(t0[0], ctx->fp3_p3[1], ctx->fp3_p2[1]); fp_neg(ctx->fp3_p3[1], t0[0]); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(ctx->fp3_p3[1], ctx->fp3_p3[1], t0[0]); } fp_mul(ctx->fp3_p3[2], ctx->fp3_p[2], ctx->fp3_p2[2]); fp_mul(ctx->fp3_p3[3], ctx->fp3_p[3], ctx->fp3_base[0]); fp_mul(t0[0], ctx->fp3_p3[3], ctx->fp3_p2[3]); fp_neg(ctx->fp3_p3[3], t0[0]); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(ctx->fp3_p3[3], ctx->fp3_p3[3], t0[0]); } fp_mul(ctx->fp3_p3[4], ctx->fp3_p[4], ctx->fp3_base[1]); fp_mul(t0[0], ctx->fp3_p3[4], ctx->fp3_p2[4]); fp_neg(ctx->fp3_p3[4], t0[0]); for (int i = -1; i > fp_prime_get_cnr(); i--) { fp_sub(ctx->fp3_p3[4], ctx->fp3_p3[4], t0[0]); } for (int i = 0; i < 5; i++) { fp_mul(ctx->fp3_p4[i], ctx->fp3_p[i], ctx->fp3_p3[i]); fp_mul(ctx->fp3_p5[i], ctx->fp3_p2[i], ctx->fp3_p3[i]); } } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { bn_free(e); fp3_free(t0); fp3_free(t1); fp3_free(t2); } }
void fp_negm_low(dig_t *c, const dig_t *a) { fp_subn_low(c, fp_prime_get(), a); }
void fp2_nord_low(dv2_t c, dv2_t a) { dv2_t t; bn_t b; dv2_null(t); bn_null(b); TRY { dv2_new(t); bn_new(b); #if FP_PRIME == 158 fp_addc_low(t[0], a[0], a[0]); fp_addc_low(t[0], t[0], t[0]); fp_subc_low(t[0], t[0], a[1]); fp_addc_low(t[1], a[1], a[1]); fp_addc_low(t[1], t[1], t[1]); fp_addc_low(c[1], a[0], t[1]); dv_copy(c[0], t[0], 2 * FP_DIGS); #elif defined(FP_QNRES) /* If p = 3 mod 8, (1 + i) is a QNR/CNR. */ /* (a_0 + a_1 * i) * (1 + i) = (a_0 - a_1) + (a_0 + a_1) * u. */ dv_copy(t[0], a[1], 2 * FP_DIGS); fp_addc_low(c[1], a[0], a[1]); fp_subc_low(c[0], a[0], t[0]); #else switch (fp_prime_get_mod8()) { case 3: /* If p = 3 mod 8, (1 + u) is a QNR, u^2 = -1. */ /* (a_0 + a_1 * u) * (1 + u) = (a_0 - a_1) + (a_0 + a_1) * u. */ dv_copy(t[0], a[1], 2 * FP_DIGS); fp_addc_low(c[1], a[0], a[1]); fp_subc_low(c[0], a[0], t[0]); break; case 5: /* If p = 5 mod 8, (u) is a QNR. */ dv_copy(t[0], a[0], 2 * FP_DIGS); dv_zero(t[1], FP_DIGS); dv_copy(t[1] + FP_DIGS, fp_prime_get(), FP_DIGS); fp_subc_low(c[0], t[1], a[1]); for (int i = -1; i > fp_prime_get_qnr(); i--) { fp_subc_low(c[0], c[0], a[1]); } dv_copy(c[1], t[0], 2 * FP_DIGS); break; case 7: /* If p = 7 mod 8, (2^lg_4(b-1) + u) is a QNR/CNR. */ /* (a_0 + a_1 * u)(2^lg_4(b-1) + u) = * (2^lg_4(b-1)a_0 - a_1) + (a_0 + 2^lg_4(b-1)a_1 * u. */ fp2_addc_low(t, a, a); fp_prime_back(b, ep_curve_get_b()); for (int i = 1; i < bn_bits(b) / 2; i++) { fp2_addc_low(t, t, t); } fp_subc_low(c[0], t[0], a[1]); fp_addc_low(c[1], t[1], a[0]); break; default: THROW(ERR_NO_VALID); break; } #endif } CATCH_ANY { THROW(ERR_CAUGHT); } FINALLY { dv2_free(t); bn_free(b); } }