Exemplo n.º 1
0
void fp2_nord_low(dv2_t c, dv2_t a) {
	dv2_t t;
	bn_t b;

	dv2_null(t);
	bn_null(b);

	TRY {
		dv2_new(t);
		bn_new(b);

#ifdef FP_QNRES
		/* If p = 3 mod 8, (1 + i) is a QNR/CNR. */
		/* (a_0 + a_1 * i) * (1 + i) = (a_0 - a_1) + (a_0 + a_1) * u. */
		dv_copy(t[0], a[1], 2 * FP_DIGS);
		fp_addc_low(c[1], a[0], a[1]);
		fp_subc_low(c[0], a[0], t[0]);
#else
		switch (fp_prime_get_mod8()) {
			case 3:
				/* If p = 3 mod 8, (1 + u) is a QNR, u^2 = -1. */
				/* (a_0 + a_1 * u) * (1 + u) = (a_0 - a_1) + (a_0 + a_1) * u. */
				dv_copy(t[0], a[1], 2 * FP_DIGS);
				fp_addc_low(c[1], a[0], a[1]);
				fp_subc_low(c[0], a[0], t[0]);
				break;
			case 1:
			case 5:
				/* If p = 1,5 mod 8, (u) is a QNR. */
				dv_copy(t[0], a[0], 2 * FP_DIGS);
				dv_zero(t[1], FP_DIGS);
				dv_copy(t[1] + FP_DIGS, fp_prime_get(), FP_DIGS);
				fp_subc_low(c[0], t[1], a[1]);
				for (int i = -1; i > fp_prime_get_qnr(); i--) {
					fp_subc_low(c[0], c[0], a[1]);
				}
				dv_copy(c[1], t[0], 2 * FP_DIGS);
				break;
			case 7:
				/* If p = 7 mod 8, (2 + u) is a QNR/CNR.   */
				fp2_addc_low(t, a, a);
				fp_subc_low(c[0], t[0], a[1]);
				fp_addc_low(c[1], t[1], a[0]);
				break;
			default:
				THROW(ERR_NO_VALID);
				break;
		}
#endif
	}
	CATCH_ANY {
		THROW(ERR_CAUGHT);
	}
	FINALLY {
		dv2_free(t);
		bn_free(b);
	}
}
Exemplo n.º 2
0
void fp3_sqrn_low(dv3_t c, fp3_t a) {
	align dig_t t0[2 * FP_DIGS], t1[2 * FP_DIGS], t2[2 * FP_DIGS];
	align dig_t t3[2 * FP_DIGS], t4[2 * FP_DIGS], t5[2 * FP_DIGS];

	/* t0 = a_0^2. */
	fp_sqrn_low(t0, a[0]);

	/* t1 = 2 * a_1 * a_2. */
#ifdef FP_SPACE
	fp_dbln_low(t2, a[1]);
#else
	fp_dblm_low(t2, a[1]);
#endif

	fp_muln_low(t1, t2, a[2]);

	/* t2 = a_2^2. */
	fp_sqrn_low(t2, a[2]);

	/* t3 = (a_0 + a_2 + a_1)^2, t4 = (a_0 + a_2 - a_1)^2. */
#ifdef FP_SPACE
	fp_addn_low(t3, a[0], a[2]);
	fp_addn_low(t4, t3, a[1]);
#else
	fp_addm_low(t3, a[0], a[2]);
	fp_addm_low(t4, t3, a[1]);
#endif
	fp_subm_low(t5, t3, a[1]);
	fp_sqrn_low(t3, t4);
	fp_sqrn_low(t4, t5);

	/* t4 = (t4 + t3)/2. */
	fp_addd_low(t4, t4, t3);
	fp_hlvd_low(t4, t4);

	/* t3 = t3 - t4 - t1. */
	fp_addc_low(t5, t1, t4);
	fp_subc_low(t3, t3, t5);

	/* c_2 = t4 - t0 - t2. */
	fp_addc_low(t5, t0, t2);
	fp_subc_low(c[2], t4, t5);

	/* c_0 = t0 + t1 * B. */
	fp_subc_low(c[0], t0, t1);
	for (int i = -1; i > fp_prime_get_cnr(); i--) {
		fp_subc_low(c[0], c[0], t1);
	}

	/* c_1 = t3 + t2 * B. */
	fp_subc_low(c[1], t3, t2);
	for (int i = -1; i > fp_prime_get_cnr(); i--) {
		fp_subc_low(c[1], c[1], t2);
	}
}
Exemplo n.º 3
0
void fp2_mul_basic(fp2_t c, fp2_t a, fp2_t b) {
	dv_t t0, t1, t2, t3, t4;

	dv_null(t0);
	dv_null(t1);
	dv_null(t2);
	dv_null(t3);
	dv_null(t4);

	TRY {
		dv_new(t0);
		dv_new(t1);
		dv_new(t2);
		dv_new(t3);
		dv_new(t4);

		/* Karatsuba algorithm. */

		/* t2 = a_0 + a_1, t1 = b0 + b1. */
		fp_add(t2, a[0], a[1]);
		fp_add(t1, b[0], b[1]);

		/* t3 = (a_0 + a_1) * (b0 + b1). */
		fp_muln_low(t3, t2, t1);

		/* t0 = a_0 * b0, t4 = a_1 * b1. */
		fp_muln_low(t0, a[0], b[0]);
		fp_muln_low(t4, a[1], b[1]);

		/* t2 = (a_0 * b0) + (a_1 * b1). */
		fp_addc_low(t2, t0, t4);

		/* t1 = (a_0 * b0) + u^2 * (a_1 * b1). */
		fp_subc_low(t1, t0, t4);

		/* t1 = u^2 * (a_1 * b1). */
		for (int i = -1; i > fp_prime_get_qnr(); i--) {
			fp_subc_low(t1, t1, t4);
		}
		/* c_0 = t1 mod p. */
		fp_rdc(c[0], t1);

		/* t4 = t3 - t2. */
		fp_subc_low(t4, t3, t2);

		/* c_1 = t4 mod p. */
		fp_rdc(c[1], t4);
	}
	CATCH_ANY {
		THROW(ERR_CAUGHT);
	}
	FINALLY {
		dv_free(t0);
		dv_free(t1);
		dv_free(t2);
		dv_free(t3);
		dv_free(t4);
	}
}
Exemplo n.º 4
0
void fp2_muln_low(dv2_t c, fp2_t a, fp2_t b) {
	align dig_t t0[2 * FP_DIGS], t1[2 * FP_DIGS], t2[2 * FP_DIGS];

	/* Karatsuba algorithm. */

	/* t0 = a_0 + a_1, t1 = b_0 + b_1. */
#ifdef FP_SPACE
	fp_addn_low(t0, a[0], a[1]);
	fp_addn_low(t1, b[0], b[1]);
#else
	fp_addm_low(t0, a[0], a[1]);
	fp_addm_low(t1, b[0], b[1]);
#endif
	/* c_0 = a_0 * b_0, c_1 = a_1 * b_1. */
	fp_muln_low(c[0], a[0], b[0]);
	fp_muln_low(c[1], a[1], b[1]);
	/* t2 = (a_0 + a_1) * (b_0 + b_1). */
	fp_muln_low(t2, t0, t1);

	/* t0 = (a_0 * b_0) + (a_1 * b_1). */
#ifdef FP_SPACE
	fp_addd_low(t0, c[0], c[1]);
#else
	fp_addc_low(t0, c[0], c[1]);
#endif

	/* c_0 = (a_0 * b_0) + u^2 * (a_1 * b_1). */
	fp_subc_low(c[0], c[0], c[1]);

#ifndef FP_QNRES
	/* t1 = u^2 * (a_1 * b_1). */
	for (int i = -1; i > fp_prime_get_qnr(); i--) {
		fp_subc_low(c[0], c[0], c[1]);
	}
	for (int i = 0; i <= fp_prime_get_qnr(); i++) {
		fp_addc_low(c[0], c[0], c[1]);
	}	
#endif

	/* c_1 = t2 - t0. */
#ifdef FP_SPACE
	fp_subd_low(c[1], t2, t0);
#else
	fp_subc_low(c[1], t2, t0);
#endif
}
Exemplo n.º 5
0
void fp3_muln_low(dv3_t c, fp3_t a, fp3_t b) {
	align dig_t t0[2 * FP_DIGS], t1[2 * FP_DIGS], t2[2 * FP_DIGS], t3[2 * FP_DIGS];
	align dig_t t4[2 * FP_DIGS], t5[2 * FP_DIGS], t6[2 * FP_DIGS];

	/* Karatsuba algorithm. */

	/* t0 = a_0 * b_0, t1 = a_1 * b_1, t2 = a_2 * b_2. */
	fp_muln_low(t0, a[0], b[0]);
	fp_muln_low(t1, a[1], b[1]);
	fp_muln_low(t2, a[2], b[2]);

	/* t3 = (a_1 + a_2) * (b_1 + b_2). */
#ifdef FP_SPACE
	fp_addn_low(t3, a[1], a[2]);
	fp_addn_low(t4, b[1], b[2]);
#else
	fp_addm_low(t3, a[1], a[2]);
	fp_addm_low(t4, b[1], b[2]);
#endif
	fp_muln_low(t5, t3, t4);
	fp_addd_low(t6, t1, t2);
	fp_subc_low(t4, t5, t6);
	fp_subc_low(c[0], t0, t4);
	for (int i = -1; i > fp_prime_get_cnr(); i--) {
		fp_subc_low(c[0], c[0], t4);
	}

#ifdef FP_SPACE
	fp_addn_low(t4, a[0], a[1]);
	fp_addn_low(t5, b[0], b[1]);
#else
	fp_addm_low(t4, a[0], a[1]);
	fp_addm_low(t5, b[0], b[1]);
#endif
	fp_muln_low(t6, t4, t5);
	fp_addd_low(t4, t0, t1);
	fp_subc_low(t4, t6, t4);
	fp_subc_low(c[1], t4, t2);
	for (int i = -1; i > fp_prime_get_cnr(); i--) {
		fp_subc_low(c[1], c[1], t2);
	}

#ifdef FP_SPACE
	fp_addn_low(t5, a[0], a[2]);
	fp_addn_low(t6, b[0], b[2]);
#else
	fp_addm_low(t5, a[0], a[2]);
	fp_addm_low(t6, b[0], b[2]);
#endif
	fp_muln_low(t4, t5, t6);
	fp_addd_low(t6, t0, t2);
	fp_subc_low(t5, t4, t6);
	fp_addc_low(c[2], t5, t1);
}
Exemplo n.º 6
0
void fp2_addc_low(dv2_t c, dv2_t a, dv2_t b) {
	fp_addc_low(c[0], a[0], b[0]);
	fp_addc_low(c[1], a[1], b[1]);
}
Exemplo n.º 7
0
void fp2_nord_low(dv2_t c, dv2_t a) {
	dv2_t t;
	bn_t b;

	dv2_null(t);
	bn_null(b);

	TRY {
		dv2_new(t);
		bn_new(b);

#if FP_PRIME == 158
		fp_addc_low(t[0], a[0], a[0]);
		fp_addc_low(t[0], t[0], t[0]);
		fp_subc_low(t[0], t[0], a[1]);
		fp_addc_low(t[1], a[1], a[1]);
		fp_addc_low(t[1], t[1], t[1]);
		fp_addc_low(c[1], a[0], t[1]);
		dv_copy(c[0], t[0], 2 * FP_DIGS);
#elif defined(FP_QNRES)
		/* If p = 3 mod 8, (1 + i) is a QNR/CNR. */
		/* (a_0 + a_1 * i) * (1 + i) = (a_0 - a_1) + (a_0 + a_1) * u. */
		dv_copy(t[0], a[1], 2 * FP_DIGS);
		fp_addc_low(c[1], a[0], a[1]);
		fp_subc_low(c[0], a[0], t[0]);
#else
		switch (fp_prime_get_mod8()) {
			case 3:
				/* If p = 3 mod 8, (1 + u) is a QNR, u^2 = -1. */
				/* (a_0 + a_1 * u) * (1 + u) = (a_0 - a_1) + (a_0 + a_1) * u. */
				dv_copy(t[0], a[1], 2 * FP_DIGS);
				fp_addc_low(c[1], a[0], a[1]);
				fp_subc_low(c[0], a[0], t[0]);
				break;
			case 5:
				/* If p = 5 mod 8, (u) is a QNR. */
				dv_copy(t[0], a[0], 2 * FP_DIGS);
				dv_zero(t[1], FP_DIGS);
				dv_copy(t[1] + FP_DIGS, fp_prime_get(), FP_DIGS);
				fp_subc_low(c[0], t[1], a[1]);
				for (int i = -1; i > fp_prime_get_qnr(); i--) {
					fp_subc_low(c[0], c[0], a[1]);
				}
				dv_copy(c[1], t[0], 2 * FP_DIGS);
				break;
			case 7:
				/* If p = 7 mod 8, (2^lg_4(b-1) + u) is a QNR/CNR.   */
				/* (a_0 + a_1 * u)(2^lg_4(b-1) + u) =
				 * (2^lg_4(b-1)a_0 - a_1) + (a_0 + 2^lg_4(b-1)a_1 * u. */
				fp2_addc_low(t, a, a);
				fp_prime_back(b, ep_curve_get_b());
				for (int i = 1; i < bn_bits(b) / 2; i++) {
					fp2_addc_low(t, t, t);
				}
				fp_subc_low(c[0], t[0], a[1]);
				fp_addc_low(c[1], t[1], a[0]);
				break;
			default:
				THROW(ERR_NO_VALID);
				break;
		}
#endif
	}
	CATCH_ANY {
		THROW(ERR_CAUGHT);
	}
	FINALLY {
		dv2_free(t);
		bn_free(b);
	}
}
Exemplo n.º 8
0
void fp3_sqr_basic(fp3_t c, fp3_t a) {
	dv_t t0, t1, t2, t3, t4, t5;

	dv_null(t0);
	dv_null(t1);
	dv_null(t2);
	dv_null(t3);
	dv_null(t4);
	dv_null(t5);

	TRY {
		dv_new(t0);
		dv_new(t1);
		dv_new(t2);
		dv_new(t3);
		dv_new(t4);
		dv_new(t5);

		/* t0 = a_0^2. */
		fp_sqrn_low(t0, a[0]);

		/* t1 = 2 * a_1 * a_2. */
		fp_dbl(t2, a[1]);
		fp_muln_low(t1, t2, a[2]);

		/* t2 = a_2^2. */
		fp_sqrn_low(t2, a[2]);

		/* t3 = (a_0 + a_2 + a_1)^2, t4 = (a_0 + a_2 - a_1)^2. */
		fp_add(t3, a[0], a[2]);
		fp_add(t4, t3, a[1]);
		fp_sub(t5, t3, a[1]);
		fp_sqrn_low(t3, t4);
		fp_sqrn_low(t4, t5);

		/* t4 = (t4 + t3)/2. */
		fp_addd_low(t4, t4, t3);
		fp_hlvd_low(t4, t4);

		/* t3 = t3 - t4 - t1. */
		fp_addc_low(t5, t1, t4);
		fp_subc_low(t3, t3, t5);

		/* c_2 = t4 - t0 - t2. */
		fp_addc_low(t5, t0, t2);
		fp_subc_low(t4, t4, t5);
		fp_rdc(c[2], t4);

		/* c_0 = t0 + t1 * B. */
		fp_subc_low(t0, t0, t1);
		for (int i = -1; i > fp_prime_get_cnr(); i--) {
			fp_subc_low(t0, t0, t1);
		}
		fp_rdc(c[0], t0);

		/* c_1 = t3 + t2 * B. */
		fp_subc_low(t3, t3, t2);
		for (int i = -1; i > fp_prime_get_cnr(); i--) {
			fp_subc_low(t3, t3, t2);
		}
		fp_rdc(c[1], t3);
	}
	CATCH_ANY {
		THROW(ERR_CAUGHT);
	}
	FINALLY {
		dv_free(t0);
		dv_free(t1);
		dv_free(t2);
		dv_free(t3);
		dv_free(t4);
		dv_free(t5);
	}
}
Exemplo n.º 9
0
void fp2_sqrn_low(dv2_t c, fp2_t a) {
	align dig_t t0[2 * FP_DIGS], t1[2 * FP_DIGS], t2[2 * FP_DIGS];

	/* t0 = (a0 + a1). */
#ifdef FP_SPACE
	/* if we have room for carries, we can avoid reductions here. */
	fp_addn_low(t0, a[0], a[1]);
#else
	fp_addm_low(t0, a[0], a[1]);
#endif
	/* t1 = (a0 - a1). */
	fp_subm_low(t1, a[0], a[1]);

#ifdef FP_QNRES

#ifdef FP_SPACE
	fp_dbln_low(t2, a[0]);
#else
	fp_dblm_low(t2, a[0]);
#endif
	/* c1 = 2 * a0 * a1. */
	fp_muln_low(c[1], t2, a[1]);
	/* c_0 = a_0^2 + a_1^2 * u^2. */
	fp_muln_low(c[0], t0, t1);

#else /* !FP_QNRES */

	/* t1 = u^2 * (a1 * b1). */
	for (int i = -1; i > fp_prime_get_qnr(); i--) {
		fp_subm_low(t1, t1, a[1]);
	}

	if (fp_prime_get_qnr() == -1) {
		/* t2 = 2 * a0. */
		fp_dbl(t2, a[0]);
		/* c1 = 2 * a0 * a1. */
		fp_muln_low(c[1], t2, a[1]);
		/* c0 = a0^2 + b_0^2 * u^2. */
		fp_muln_low(c[0], t0, t1);
	} else {
		/* c1 = a0 * a1. */
		fp_muln_low(c[1], a[0], a[1]);
		/* c0 = a0^2 + b_0^2 * u^2. */
		fp_muln_low(c[0], t0, t1);

#ifdef FP_SPACE
		for (int i = -1; i > fp_prime_get_qnr(); i--) {
			fp_addd_low(c[0], c[0], c[1]);
		}
		/* c1 = 2 * a0 * a1. */
		fp_addd_low(c[1], c[1], c[1]);
#else
		for (int i = -1; i > fp_prime_get_qnr(); i--) {
			fp_addc_low(c[0], c[0], c[1]);
		}
		/* c1 = 2 * a0 * a1. */
		fp_addc_low(c[1], c[1], c[1]);
#endif
	}
#endif
	/* c = c0 + c1 * u. */
}
Exemplo n.º 10
0
void pp_dbl_k2_projc_lazyr(fp2_t l, ep_t r, ep_t p, ep_t q) {
	fp_t t0, t1, t2, t3, t4, t5;
	dv_t u0, u1;

	fp_null(t0);
	fp_null(t1);
	fp_null(t2);
	fp_null(t3);
	fp_null(t4);
	fp_null(t5);
	dv_null(u0);
	dv_null(u1);

	TRY {
		fp_new(t0);
		fp_new(t1);
		fp_new(t2);
		fp_new(t3);
		fp_new(t4);
		fp_new(t5);
		dv_new(u0);
		dv_new(u1);

		/* For these curves, we always can choose a = -3. */
		/* dbl-2001-b formulas: 3M + 5S + 8add + 1*4 + 2*8 + 1*3 */
		/* http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b */

		/* t0 = delta = z1^2. */
		fp_sqr(t0, p->z);

		/* t1 = gamma = y1^2. */
		fp_sqr(t1, p->y);

		/* t2 = beta = x1 * y1^2. */
		fp_mul(t2, p->x, t1);

		/* t3 = alpha = 3 * (x1 - z1^2) * (x1 + z1^2). */
		fp_sub(t3, p->x, t0);
		fp_add(t4, p->x, t0);
		fp_mul(t4, t3, t4);
		fp_dbl(t3, t4);
		fp_add(t3, t3, t4);

		/* t2 = 4 * beta. */
		fp_dbl(t2, t2);
		fp_dbl(t2, t2);

		/* z3 = (y1 + z1)^2 - gamma - delta. */
		fp_add(r->z, p->y, p->z);
		fp_sqr(r->z, r->z);
		fp_sub(r->z, r->z, t1);
		fp_sub(r->z, r->z, t0);

		/* l0 = 2 * gamma - alpha * (delta * xq + x1). */
		fp_dbl(t1, t1);
		fp_mul(t5, t0, q->x);
		fp_add(t5, t5, p->x);
		fp_mul(t5, t5, t3);
		fp_sub(l[0], t1, t5);

		/* x3 = alpha^2 - 8 * beta. */
		fp_dbl(t5, t2);
		fp_sqr(r->x, t3);
		fp_sub(r->x, r->x, t5);

		/* y3 = alpha * (4 * beta - x3) - 8 * gamma^2. */
		fp_sqrn_low(u0, t1);
		fp_addc_low(u0, u0, u0);
		fp_subm_low(r->y, t2, r->x);
		fp_muln_low(u1, r->y, t3);
		fp_subc_low(u1, u1, u0);
		fp_rdcn_low(r->y, u1);

		/* l1 = - z3 * delta * yq. */
		fp_mul(l[1], r->z, t0);
		fp_mul(l[1], l[1], q->y);

		r->norm = 0;
	}
	CATCH_ANY {
		THROW(ERR_CAUGHT);
	}
	FINALLY {
		fp_free(t0);
		fp_free(t1);
		fp_free(t2);
		fp_free(t3);
		fp_free(t4);
		fp_free(t5);
		dv_free(u0);
		dv_free(u1);
	}
}
Exemplo n.º 11
0
void fp3_mul_basic(fp3_t c, fp3_t a, fp3_t b) {
	dv_t t, t0, t1, t2, t3, t4, t5, t6;

	dv_null(t);
	dv_null(t0);
	dv_null(t1);
	dv_null(t2);
	dv_null(t3);
	dv_null(t4);
	dv_null(t5);
	dv_null(t6);

	TRY {
		dv_new(t);
		dv_new(t0);
		dv_new(t1);
		dv_new(t2);
		dv_new(t3);
		dv_new(t4);
		dv_new(t5);
		dv_new(t6);

		/* Karatsuba algorithm. */

		/* t0 = a_0 * b_0, t1 = a_1 * b_1, t2 = a_2 * b_2. */
		fp_muln_low(t0, a[0], b[0]);
		fp_muln_low(t1, a[1], b[1]);
		fp_muln_low(t2, a[2], b[2]);

		/* t3 = (a_1 + a_2) * (b_1 + b_2). */
		fp_add(t3, a[1], a[2]);
		fp_add(t4, b[1], b[2]);
		fp_muln_low(t, t3, t4);
		fp_addd_low(t6, t1, t2);
		fp_subc_low(t4, t, t6);
		fp_subc_low(t3, t0, t4);
		for (int i = -1; i > fp_prime_get_cnr(); i--) {
			fp_subc_low(t3, t3, t4);
		}

		fp_add(t4, a[0], a[1]);
		fp_add(t5, b[0], b[1]);
		fp_muln_low(t, t4, t5);
		fp_addd_low(t4, t0, t1);
		fp_subc_low(t4, t, t4);
		fp_subc_low(t4, t4, t2);
		for (int i = -1; i > fp_prime_get_cnr(); i--) {
			fp_subc_low(t4, t4, t2);
		}

		fp_add(t5, a[0], a[2]);
		fp_add(t6, b[0], b[2]);
		fp_muln_low(t, t5, t6);
		fp_addd_low(t6, t0, t2);
		fp_subc_low(t5, t, t6);
		fp_addc_low(t5, t5, t1);

		/* c_0 = t3 mod p. */
		fp_rdc(c[0], t3);

		/* c_1 = t4 mod p. */
		fp_rdc(c[1], t4);

		/* c_2 = t5 mod p. */
		fp_rdc(c[2], t5);
	}
	CATCH_ANY {
		THROW(ERR_CAUGHT);
	}
	FINALLY {
		dv_free(t);
		dv_free(t0);
		dv_free(t1);
		dv_free(t2);
		dv_free(t3);
		dv_free(t4);
		dv_free(t5);
		dv_free(t6);
	}
}