void fp_rdc_monty_basic(fp_t c, dv_t a) { int i; dig_t r, c0, c1, *tmp, u0; const dig_t *p = NULL; tmp = a; u0 = *(fp_prime_get_rdc()); p = fp_prime_get(); c1 = 0; for (i = 0; i < FP_DIGS; i++, tmp++) { r = (dig_t)(*tmp * u0); c0 = fp_mula_low(tmp, fp_prime_get(), r); /* We must use this because the size (FP_DIGS - i) is variable. */ c1 += bn_add1_low(tmp + FP_DIGS, tmp + FP_DIGS, c0, FP_DIGS - i); } fp_copy(c, a + FP_DIGS); for (i = 0; i < c1; i++) { fp_subn_low(c, c, p); } if (fp_cmpn_low(c, p) != CMP_LT) { fp_subn_low(c, c, p); } }
void fp_addc_low(dig_t *c, const dig_t *a, const dig_t *b) { dig_t carry = fp_addd_low(c, a, b); if (carry || (fp_cmpn_low(c + FP_DIGS, fp_prime_get()) != CMP_LT)) { carry = fp_subn_low(c + FP_DIGS, c + FP_DIGS, fp_prime_get()); } }
void fp_dblm_low(dig_t *c, const dig_t *a) { int i; dig_t carry, c0, c1, r0, r1; carry = 0; for (i = 0; i < FP_DIGS; i++, a++) { r0 = (*a) + (*a); c0 = (r0 < (*a)); r1 = r0 + carry; c1 = (r1 < r0); carry = c0 | c1; c[i] = r1; } if (carry || (fp_cmpn_low(c, fp_prime_get()) != CMP_LT)) { carry = fp_subn_low(c, c, fp_prime_get()); } }
void fp_rdcn_low2(dig_t *c, dig_t *a) { int i, j; dig_t r0, r1, r2, u; dig_t *m, *tmp, *tmpm, *tmpc; u = *(fp_prime_get_rdc()); m = fp_prime_get(); tmpc = c; r0 = r1 = r2 = 0; for (i = 0; i < FP_DIGS; i++, tmpc++, a++) { tmp = c; tmpm = m + i; for (j = 0; j < i; j++, tmp++, tmpm--) { COMBA_STEP(r2, r1, r0, *tmp, *tmpm); } COMBA_ADD(r2, r1, r0, *a); *tmpc = (dig_t)(r0 * u); COMBA_STEP(r2, r1, r0, *tmpc, *m); r0 = r1; r1 = r2; r2 = 0; } for (i = FP_DIGS; i < 2 * FP_DIGS - 1; i++, a++) { tmp = c + (i - FP_DIGS + 1); tmpm = m + FP_DIGS - 1; for (j = i - FP_DIGS + 1; j < FP_DIGS; j++, tmp++, tmpm--) { COMBA_STEP(r2, r1, r0, *tmp, *tmpm); } COMBA_ADD(r2, r1, r0, *a); c[i - FP_DIGS] = r0; r0 = r1; r1 = r2; r2 = 0; } COMBA_ADD(r2, r1, r0, *a); c[FP_DIGS - 1] = r0; if (r1 || fp_cmpn_low(c, m) != CMP_LT) { fp_subn_low(c, c, m); } }
void fp_rdcs_low(dig_t *c, dig_t *a, dig_t *m) { align dig_t q[2 * FP_DIGS], _q[2 * FP_DIGS]; align dig_t _r[2 * FP_DIGS], r[2 * FP_DIGS], t[2 * FP_DIGS]; int *sform, len; int first, i, j, b0, d0, b1, d1; dig_t carry; sform = fp_prime_get_sps(&len); SPLIT(b0, d0, FP_BITS, FP_DIG_LOG); first = (d0) + (b0 == 0 ? 0 : 1); /* q = floor(a/b^k) */ dv_zero(q, 2 * FP_DIGS); bn_rshd_low(q, a, 2 * FP_DIGS, d0); if (b0 > 0) { bn_rshb_low(q, q, 2 * FP_DIGS, b0); } /* r = a - qb^k. */ dv_copy(r, a, first); if (b0 > 0) { r[first - 1] &= MASK(b0); } carry = 0; while (!fp_is_zero(q)) { dv_zero(_q, 2 * FP_DIGS); for (i = len - 1; i > 0; i--) { j = (sform[i] < 0 ? -sform[i] : sform[i]); SPLIT(b1, d1, j, FP_DIG_LOG); dv_zero(t, 2 * FP_DIGS); bn_lshd_low(t, q, FP_DIGS, d1); if (b1 > 0) { bn_lshb_low(t, t, 2 * FP_DIGS, b1); } if (sform[i] > 0) { bn_subn_low(_q, _q, t, 2 * FP_DIGS); } else { bn_addn_low(_q, _q, t, 2 * FP_DIGS); } } if (sform[0] > 0) { bn_subn_low(_q, _q, q, 2 * FP_DIGS); } else { bn_addn_low(_q, _q, q, 2 * FP_DIGS); } bn_rshd_low(q, _q, 2 * FP_DIGS, d0); if (b0 > 0) { bn_rshb_low(q, q, 2 * FP_DIGS, b0); } dv_copy(_r, _q, first); if (b0 > 0) { _r[first - 1] &= MASK(b0); } fp_add(r, r, _r); } while (fp_cmpn_low(r, m) != CMP_LT) { fp_subn_low(r, r, m); } fp_copy(c, r); }
int fp_cmp(const fp_t a, const fp_t b) { return fp_cmpn_low(a, b); }