void GFp_p384_scalar_mul_mont(ScalarMont r, const ScalarMont a, const ScalarMont b) { static const BN_ULONG N_N0[] = { BN_MONT_CTX_N0(0x6ed46089, 0xe88fdc45) }; /* XXX: Inefficient. TODO: Add dedicated multiplication routine. */ bn_mul_mont(r, a, b, N, N_N0, P384_LIMBS); }
static inline void elem_mul_mont(Elem r, const Elem a, const Elem b) { static const BN_ULONG Q_N0[] = { BN_MONT_CTX_N0(0x1, 0x1) }; /* XXX: Not (clearly) constant-time; inefficient. TODO: Add a dedicated * squaring routine. */ bn_mul_mont(r, a, b, Q, Q_N0, P384_LIMBS); }
int My_bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num) { int num2 = num; BN_ULONG buf_OSL[100]; BN_ULONG buf_SSE[100]; BN_ULONG buf_C[100]; bn_mul_mont(buf_OSL, ap, bp, np, n0, num2); C_bn_mul_mont(buf_C, ap, bp, np, n0, num2); ((PFN_bn_mul_mont)&MontgomeryMul32_SSE)(buf_SSE, ap, bp, np, n0, num2); if (memcmp(buf_SSE, buf_C, 4 * num)) ap = ap; if (memcmp(buf_C, buf_OSL, 4 * num)) ap = ap; if (memcmp(buf_OSL, buf_SSE, 4 * num)) ap = ap; return bn_mul_mont(rp, ap, bp, np, n0, num); }
void GFp_p256_scalar_mul_mont(ScalarMont r, const ScalarMont a, const ScalarMont b) { static const BN_ULONG N[] = { TOBN(0xf3b9cac2, 0xfc632551), TOBN(0xbce6faad, 0xa7179e84), TOBN(0xffffffff, 0xffffffff), TOBN(0xffffffff, 0x00000000), }; static const BN_ULONG N_N0[] = { BN_MONT_CTX_N0(0xccd1c8aa, 0xee00bc4f) }; /* XXX: Inefficient. TODO: optimize with dedicated multiplication routine. */ bn_mul_mont(r, a, b, N, N_N0, P256_LIMBS); }
int bn_mul_mont_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_MONT_CTX *mont, BN_CTX *ctx) { BIGNUM *tmp; int ret = 0; int num = mont->N.top; #if defined(OPENSSL_BN_ASM_MONT) && defined(MONT_WORD) if (num > 1 && a->top == num && b->top == num) { if (bn_wexpand(r, num) == NULL) return (0); if (bn_mul_mont(r->d, a->d, b->d, mont->N.d, mont->n0, num)) { r->neg = a->neg ^ b->neg; r->top = num; r->flags |= BN_FLG_FIXED_TOP; return (1); } } #endif if ((a->top + b->top) > 2 * num) return 0; BN_CTX_start(ctx); tmp = BN_CTX_get(ctx); if (tmp == NULL) goto err; bn_check_top(tmp); if (a == b) { if (!BN_sqr(tmp, a, ctx)) goto err; } else { if (!BN_mul(tmp, a, b, ctx)) goto err; } /* reduce from aRR to aR */ #ifdef MONT_WORD if (!bn_from_montgomery_word(r, tmp, mont)) goto err; #else if (!BN_from_montgomery(r, tmp, mont, ctx)) goto err; #endif ret = 1; err: BN_CTX_end(ctx); return (ret); }
int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BN_MONT_CTX *mont, BN_CTX *ctx) { BIGNUM *tmp; int ret = 0; #if defined(OPENSSL_BN_ASM_MONT) int num = mont->N.top; if (num > 1 && a->top == num && b->top == num) { if (bn_wexpand(r, num) == NULL) { return 0; } if (bn_mul_mont(r->d, a->d, b->d, mont->N.d, mont->n0, num)) { r->neg = a->neg ^ b->neg; r->top = num; bn_correct_top(r); return 1; } } #endif BN_CTX_start(ctx); tmp = BN_CTX_get(ctx); if (tmp == NULL) { goto err; } if (a == b) { if (!BN_sqr(tmp, a, ctx)) { goto err; } } else { if (!BN_mul(tmp, a, b, ctx)) { goto err; } } /* reduce from aRR to aR */ if (!BN_from_montgomery_word(r, tmp, mont)) { goto err; } ret = 1; err: BN_CTX_end(ctx); return ret; }
int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BN_MONT_CTX *mont, BN_CTX *ctx) { BIGNUM *tmp; int ret = 0; int num = mont->N.top; /* bn_mul_mont requires at least four limbs, at least for x86. */ if (num >= 4 && a->top == num && b->top == num) { if (bn_wexpand(r, num) == NULL) { return 0; } bn_mul_mont(r->d, a->d, b->d, mont->N.d, mont->n0, num); r->neg = a->neg ^ b->neg; r->top = num; bn_correct_top(r); return 1; } BN_CTX_start(ctx); tmp = BN_CTX_get(ctx); if (tmp == NULL) { goto err; } if (a == b) { if (!BN_sqr(tmp, a, ctx)) { goto err; } } else { if (!BN_mul(tmp, a, b, ctx)) { goto err; } } /* reduce from aRR to aR */ if (!BN_from_montgomery_word(r, tmp, mont)) { goto err; } ret = 1; err: BN_CTX_end(ctx); return ret; }