void acb_lgamma(acb_t y, const acb_t x, long prec) { int reflect; long r, n, wp; acb_t t, u; wp = prec + FLINT_BIT_COUNT(prec); acb_gamma_stirling_choose_param(&reflect, &r, &n, x, 0, 0, wp); /* log(gamma(x)) = log(gamma(x+r)) - log(rf(x,r)) */ acb_init(t); acb_init(u); acb_add_ui(t, x, r, wp); acb_gamma_stirling_eval(u, t, n, 0, wp); acb_rising_ui_rec(t, x, r, prec); acb_log(t, t, prec); _acb_log_rising_correct_branch(t, t, x, r, wp); acb_sub(y, u, t, prec); acb_clear(t); acb_clear(u); }
char * nmod_poly_get_str(const nmod_poly_t poly) { long i; char * buf, * ptr; /* estimate for the length, n and three spaces */ #if FLINT64 long size = 21*2 + 1; #else long size = 11*2 + 1; #endif for (i = 0; i < poly->length; i++) { if (poly->coeffs[i]) /* log(2)/log(10) < 0.30103, +1 for space/null */ size += (ulong) ceil(0.30103*FLINT_BIT_COUNT(poly->coeffs[i])) + 1; else size += 2; } buf = (char *) flint_malloc(size); ptr = buf + sprintf(buf, "%ld %lu", poly->length, poly->mod.n); if (poly->length) ptr += sprintf(ptr, " "); for (i = 0; i < poly->length; i++) ptr += sprintf(ptr, " %lu", poly->coeffs[i]); return buf; }
void fmpz_primorial(fmpz_t res, long n) { mp_size_t len, pi; ulong bits; __mpz_struct * mpz_ptr; if (n <= LARGEST_ULONG_PRIMORIAL) { if (n <= 2) fmpz_set_ui(res, 1 + (n==2)); else fmpz_set_ui(res, ULONG_PRIMORIALS[(n-1)/2-1]); return; } pi = n_prime_pi(n); n_compute_primes(pi); bits = FLINT_BIT_COUNT(flint_primes[pi - 1]); mpz_ptr = _fmpz_promote(res); mpz_realloc2(mpz_ptr, pi*bits); len = mpn_prod_limbs(mpz_ptr->_mp_d, flint_primes, pi, bits); mpz_ptr->_mp_size = len; }
void arb_rising_fmpq_ui(arb_t y, const fmpq_t x, ulong n, long prec) { if (n == 0) { arb_one(y); } else if (n == 1) { arb_set_fmpq(y, x, prec); } else { long wp; wp = ARF_PREC_ADD(prec, FLINT_BIT_COUNT(n)); bsplit(y, fmpq_numref(x), fmpq_denref(x), 0, n, wp); if (fmpz_is_one(fmpq_denref(x))) { arb_set_round(y, y, prec); } else { arb_t t; arb_init(t); arb_set_fmpz(t, fmpq_denref(x)); arb_pow_ui(t, t, n, wp); arb_div(y, y, t, prec); arb_clear(t); } } }
void n_prime_pi_bounds(ulong *lo, ulong *hi, mp_limb_t n) { int lg2 = FLINT_BIT_COUNT(n); *lo = (ulong)(((double)n)/((double)lg2*0.6931472)); *hi = (ulong)(((double)n)/((double)(lg2-1)*0.5522821)); }
int n_is_probabprime_fibonacci(mp_limb_t n) { mp_limb_t m; n_pair_t V; if (FLINT_ABS((mp_limb_signed_t) n) <= 3UL) { if (n >= 2UL) return 1; return 0; } m = (n - n_jacobi(5L, n)) / 2; /* cannot overflow as (5/n) = 0 for n = 2^64-1 */ if (FLINT_BIT_COUNT(n) <= FLINT_D_BITS) { double npre = n_precompute_inverse(n); V = fchain_precomp(m, n, npre); return (n_mulmod_precomp(n - 3UL, V.x, n, npre) == n_mulmod_precomp(2UL, V.y, n, npre)); } else { mp_limb_t ninv = n_preinvert_limb(n); V = fchain2_preinv(m, n, ninv); return (n_mulmod2_preinv(n - 3UL, V.x, n, ninv) == n_mulmod2_preinv(2UL, V.y, n, ninv)); } }
int main(void) { int i, result; flint_rand_t state; flint_randinit(state); printf("bit_pack/bit_unpack...."); fflush(stdout); /* Check aliasing of a and c */ for (i = 0; i < 10000; i++) { nmod_poly_t a, b; mp_limb_t n; ulong bits; mp_ptr mpn; do { n = n_randtest_not_zero(state); } while (n == 1); bits = 2 * FLINT_BIT_COUNT(n) + n_randint(state, FLINT_BITS); nmod_poly_init(a, n); nmod_poly_init(b, n); do { nmod_poly_randtest(a, state, n_randint(state, 100)); } while (a->length == 0); mpn = malloc(sizeof(mp_limb_t) * ((bits * a->length - 1) / FLINT_BITS + 1)); _nmod_poly_bit_pack(mpn, a->coeffs, a->length, bits); nmod_poly_fit_length(b, a->length); _nmod_poly_bit_unpack(b->coeffs, a->length, mpn, bits, a->mod); b->length = a->length; result = (nmod_poly_equal(a, b)); if (!result) { printf("FAIL:\n"); nmod_poly_print(a), printf("\n\n"); nmod_poly_print(b), printf("\n\n"); abort(); } nmod_poly_clear(a); nmod_poly_clear(b); } flint_randclear(state); printf("PASS\n"); return 0; }
/* Assumes poly1 and poly2 are not length 0 and 0 < trunc <= len1 + len2 - 1 */ void _nmod_poly_mullow_classical(mp_ptr res, mp_srcptr poly1, slong len1, mp_srcptr poly2, slong len2, slong trunc, nmod_t mod) { if (len1 == 1 || trunc == 1) /* Special case if the length of output is 1 */ { res[0] = n_mulmod2_preinv(poly1[0], poly2[0], mod.n, mod.ninv); } else /* Ordinary case */ { slong i; slong bits = FLINT_BITS - (slong) mod.norm; slong log_len = FLINT_BIT_COUNT(len2); if (2 * bits + log_len <= FLINT_BITS) { /* Set res[i] = poly1[i]*poly2[0] */ mpn_mul_1(res, poly1, FLINT_MIN(len1, trunc), poly2[0]); if (len2 != 1) { /* Set res[i+len1-1] = in1[len1-1]*in2[i] */ if (trunc > len1) mpn_mul_1(res + len1, poly2 + 1, trunc - len1, poly1[len1 - 1]); /* out[i+j] += in1[i]*in2[j] */ for (i = 0; i < FLINT_MIN(len1, trunc) - 1; i++) mpn_addmul_1(res + i + 1, poly2 + 1, FLINT_MIN(len2, trunc - i) - 1, poly1[i]); } _nmod_vec_reduce(res, res, trunc, mod); } else { /* Set res[i] = poly1[i]*poly2[0] */ _nmod_vec_scalar_mul_nmod(res, poly1, FLINT_MIN(len1, trunc), poly2[0], mod); if (len2 == 1) return; /* Set res[i+len1-1] = in1[len1-1]*in2[i] */ if (trunc > len1) _nmod_vec_scalar_mul_nmod(res + len1, poly2 + 1, trunc - len1, poly1[len1 - 1], mod); /* out[i+j] += in1[i]*in2[j] */ for (i = 0; i < FLINT_MIN(len1, trunc) - 1; i++) _nmod_vec_scalar_addmul_nmod(res + i + 1, poly2 + 1, FLINT_MIN(len2, trunc - i) - 1, poly1[i], mod); } } }
int main() { slong iter; flint_rand_t state; flint_printf("zeta_ui_euler_product...."); fflush(stdout); flint_randinit(state); for (iter = 0; iter < 10000 * arb_test_multiplier(); iter++) { arb_t r; ulong n; mpfr_t s; slong prec, accuracy; do { n = n_randint(state, 1 << n_randint(state, 10)); } while (n < 6); prec = 2 + n_randint(state, n * FLINT_BIT_COUNT(n)); arb_init(r); mpfr_init2(s, prec + 100); arb_zeta_ui_euler_product(r, n, prec); mpfr_zeta_ui(s, n, MPFR_RNDN); if (!arb_contains_mpfr(r, s)) { flint_printf("FAIL: containment\n\n"); flint_printf("n = %wu\n\n", n); flint_printf("r = "); arb_printd(r, prec / 3.33); flint_printf("\n\n"); flint_printf("s = "); mpfr_printf("%.275Rf\n", s); flint_printf("\n\n"); flint_abort(); } accuracy = arb_rel_accuracy_bits(r); if (accuracy < prec - 4) { flint_printf("FAIL: accuracy = %wd, prec = %wd\n\n", accuracy, prec); flint_printf("n = %wu\n\n", n); flint_printf("r = "); arb_printd(r, prec / 3.33); flint_printf("\n\n"); flint_abort(); } arb_clear(r); mpfr_clear(s); } flint_randclear(state); flint_cleanup(); flint_printf("PASS\n"); return EXIT_SUCCESS; }
long _nmod_poly_xgcd(mp_ptr G, mp_ptr S, mp_ptr T, mp_srcptr A, long lenA, mp_srcptr B, long lenB, nmod_t mod) { const long cutoff = FLINT_BIT_COUNT(mod.n) <= 8 ? NMOD_POLY_SMALL_GCD_CUTOFF : NMOD_POLY_GCD_CUTOFF; if (lenA < cutoff) return _nmod_poly_xgcd_euclidean(G, S, T, A, lenA, B, lenB, mod); else return _nmod_poly_xgcd_hgcd(G, S, T, A, lenA, B, lenB, mod); }
void _arith_euler_number_zeta(fmpz_t res, ulong n) { mpz_t r; mpfr_t t, z, pi; mp_bitcnt_t prec, pi_prec; if (n % 2) { fmpz_zero(res); return; } if (n < SMALL_EULER_LIMIT) { fmpz_set_ui(res, euler_number_small[n / 2]); if (n % 4 == 2) fmpz_neg(res, res); return; } prec = arith_euler_number_size(n) + 10; pi_prec = prec + FLINT_BIT_COUNT(n); mpz_init(r); mpfr_init2(t, prec); mpfr_init2(z, prec); mpfr_init2(pi, pi_prec); flint_mpz_fac_ui(r, n); mpfr_set_z(t, r, GMP_RNDN); mpfr_mul_2exp(t, t, n + 2, GMP_RNDN); /* pi^(n + 1) * L(n+1) */ mpfr_zeta_inv_euler_product(z, n + 1, 1); mpfr_const_pi(pi, GMP_RNDN); mpfr_pow_ui(pi, pi, n + 1, GMP_RNDN); mpfr_mul(z, z, pi, GMP_RNDN); mpfr_div(t, t, z, GMP_RNDN); /* round */ mpfr_round(t, t); mpfr_get_z(r, t, GMP_RNDN); fmpz_set_mpz(res, r); if (n % 4 == 2) fmpz_neg(res, res); mpz_clear(r); mpfr_clear(t); mpfr_clear(z); mpfr_clear(pi); }
void _nmod_poly_rem_basecase(mp_ptr R, mp_ptr W, mp_srcptr A, long lenA, mp_srcptr B, long lenB, nmod_t mod) { const long bits = 2 * (FLINT_BITS - mod.norm) + FLINT_BIT_COUNT(lenA - lenB + 1); if (bits <= FLINT_BITS) _nmod_poly_rem_basecase_1(R, W, A, lenA, B, lenB, mod); else if (bits <= 2 * FLINT_BITS) _nmod_poly_rem_basecase_2(R, W, A, lenA, B, lenB, mod); else _nmod_poly_rem_basecase_3(R, W, A, lenA, B, lenB, mod); }
void acb_mat_pow_ui(acb_mat_t B, const acb_mat_t A, ulong exp, long prec) { long d = acb_mat_nrows(A); if (exp <= 2 || d <= 1) { if (exp == 0 || d == 0) { acb_mat_one(B); } else if (d == 1) { acb_pow_ui(acb_mat_entry(B, 0, 0), acb_mat_entry(A, 0, 0), exp, prec); } else if (exp == 1) { acb_mat_set(B, A); } else if (exp == 2) { acb_mat_mul(B, A, A, prec); /* todo: sqr */ } } else { acb_mat_t T, U; long i; acb_mat_init(T, d, d); acb_mat_set(T, A); acb_mat_init(U, d, d); for (i = ((long) FLINT_BIT_COUNT(exp)) - 2; i >= 0; i--) { acb_mat_mul(U, T, T, prec); /* todo: sqr */ if (exp & (1L << i)) acb_mat_mul(T, U, A, prec); else acb_mat_swap(T, U); } acb_mat_swap(B, T); acb_mat_clear(T); acb_mat_clear(U); } }
void arb_mat_pow_ui(arb_mat_t B, const arb_mat_t A, ulong exp, slong prec) { slong d = arb_mat_nrows(A); if (exp <= 2 || d <= 1) { if (exp == 0 || d == 0) { arb_mat_one(B); } else if (d == 1) { arb_pow_ui(arb_mat_entry(B, 0, 0), arb_mat_entry(A, 0, 0), exp, prec); } else if (exp == 1) { arb_mat_set(B, A); } else if (exp == 2) { arb_mat_sqr(B, A, prec); } } else { arb_mat_t T, U; slong i; arb_mat_init(T, d, d); arb_mat_set(T, A); arb_mat_init(U, d, d); for (i = ((slong) FLINT_BIT_COUNT(exp)) - 2; i >= 0; i--) { arb_mat_sqr(U, T, prec); if (exp & (WORD(1) << i)) arb_mat_mul(T, U, A, prec); else arb_mat_swap(T, U); } arb_mat_swap(B, T); arb_mat_clear(T); arb_mat_clear(U); } }
void _nmod_poly_div_basecase(mp_ptr Q, mp_ptr W, mp_srcptr A, long A_len, mp_srcptr B, long B_len, nmod_t mod) { long bits = 2 * (FLINT_BITS - mod.norm) + FLINT_BIT_COUNT(A_len - B_len + 1); if (bits <= FLINT_BITS) _nmod_poly_div_basecase_1(Q, W, A, A_len, B, B_len, mod); else if (bits <= 2 * FLINT_BITS) _nmod_poly_div_basecase_2(Q, W, A, A_len, B, B_len, mod); else _nmod_poly_div_basecase_3(Q, W, A, A_len, B, B_len, mod); }
void _fmpz_poly_sqrlow_KS(fmpz * res, const fmpz * poly, long len, long n) { int neg; long bits, limbs, loglen, sign = 0; mp_limb_t *arr_in, *arr_out; FMPZ_VEC_NORM(poly, len); if (len == 0) { _fmpz_vec_zero(res, n); return; } neg = (fmpz_sgn(poly + len - 1) > 0) ? 0 : -1; if (n > 2 * len - 1) { _fmpz_vec_zero(res + 2 * len - 1, n - (2 * len - 1)); n = 2 * len - 1; } bits = _fmpz_vec_max_bits(poly, len); if (bits < 0) { sign = 1; bits = - bits; } loglen = FLINT_BIT_COUNT(len); bits = 2 * bits + loglen + sign; limbs = (bits * len - 1) / FLINT_BITS + 1; arr_in = flint_calloc(limbs, sizeof(mp_limb_t)); arr_out = flint_malloc((2 * limbs) * sizeof(mp_limb_t)); _fmpz_poly_bit_pack(arr_in, poly, len, bits, neg); mpn_sqr(arr_out, arr_in, limbs); if (sign) _fmpz_poly_bit_unpack(res, n, arr_out, bits, 0); else _fmpz_poly_bit_unpack_unsigned(res, n, arr_out, bits); flint_free(arr_in); flint_free(arr_out); }
static void _acb_gamma(acb_t y, const acb_t x, long prec, int inverse) { int reflect; long r, n, wp; acb_t t, u, v; wp = prec + FLINT_BIT_COUNT(prec); acb_gamma_stirling_choose_param(&reflect, &r, &n, x, 1, 0, wp); acb_init(t); acb_init(u); acb_init(v); if (reflect) { /* gamma(x) = (rf(1-x, r) * pi) / (gamma(1-x+r) sin(pi x)) */ acb_sub_ui(t, x, 1, wp); acb_neg(t, t); acb_rising_ui_rec(u, t, r, wp); arb_const_pi(acb_realref(v), wp); acb_mul_arb(u, u, acb_realref(v), wp); acb_add_ui(t, t, r, wp); acb_gamma_stirling_eval(v, t, n, 0, wp); acb_exp(v, v, wp); acb_sin_pi(t, x, wp); acb_mul(v, v, t, wp); } else { /* gamma(x) = gamma(x+r) / rf(x,r) */ acb_add_ui(t, x, r, wp); acb_gamma_stirling_eval(u, t, n, 0, wp); acb_exp(u, u, prec); acb_rising_ui_rec(v, x, r, wp); } if (inverse) acb_div(y, v, u, prec); else acb_div(y, u, v, prec); acb_clear(t); acb_clear(u); acb_clear(v); }
void fmpz_poly_mat_pow(fmpz_poly_mat_t B, const fmpz_poly_mat_t A, ulong exp) { long d = fmpz_poly_mat_nrows(A); if (exp == 0 || d == 0) { fmpz_poly_mat_one(B); } else if (exp == 1) { fmpz_poly_mat_set(B, A); } else if (exp == 2) { fmpz_poly_mat_sqr(B, A); } else if (d == 1) { fmpz_poly_pow(fmpz_poly_mat_entry(B, 0, 0), fmpz_poly_mat_entry(A, 0, 0), exp); } else { fmpz_poly_mat_t T, U; long i; fmpz_poly_mat_init_set(T, A); fmpz_poly_mat_init(U, d, d); for (i = ((long) FLINT_BIT_COUNT(exp)) - 2; i >= 0; i--) { fmpz_poly_mat_sqr(U, T); if (exp & (1L << i)) fmpz_poly_mat_mul(T, U, A); else fmpz_poly_mat_swap(T, U); } fmpz_poly_mat_swap(B, T); fmpz_poly_mat_clear(T); fmpz_poly_mat_clear(U); } }
void gamma_rising_fmprb_ui_bsplit_eight(fmprb_t y, const fmprb_t x, ulong n, long prec) { if (n == 0) { fmprb_one(y); } else if (n == 1) { fmprb_set_round(y, x, prec); } else { ulong k, a; long wp; fmprb_t t, u; wp = FMPR_PREC_ADD(prec, FLINT_BIT_COUNT(n)); fmprb_init(t); fmprb_init(u); if (n >= 8) { bsplit(t, x, 0, (n / 8) * 8, wp); a = (n / 8) * 8; } else { fmprb_set(t, x); a = 1; } for (k = a; k < n; k++) { fmprb_add_ui(u, x, k, wp); fmprb_mul(t, t, u, wp); } fmprb_set_round(y, t, prec); fmprb_clear(t); fmprb_clear(u); } }
void _nmod_poly_mulhigh(mp_ptr res, mp_srcptr poly1, long len1, mp_srcptr poly2, long len2, long n, nmod_t mod) { long bits, bits2; if (len1 + len2 <= 6) { _nmod_poly_mulhigh_classical(res, poly1, len1, poly2, len2, n, mod); return; } bits = FLINT_BITS - (long) mod.norm; bits2 = FLINT_BIT_COUNT(len1); if (2 * bits + bits2 <= FLINT_BITS && len1 + len2 < 16) _nmod_poly_mulhigh_classical(res, poly1, len1, poly2, len2, n, mod); else _nmod_poly_mul_KS(res, poly1, len1, poly2, len2, 0, mod); }
void acb_digamma(acb_t y, const acb_t x, long prec) { int reflect; long r, n, wp; acb_t t, u, v; wp = prec + FLINT_BIT_COUNT(prec); acb_gamma_stirling_choose_param(&reflect, &r, &n, x, 1, 1, wp); acb_init(t); acb_init(u); acb_init(v); /* psi(x) = psi((1-x)+r) - h(1-x,r) - pi*cot(pi*x) */ if (reflect) { acb_sub_ui(t, x, 1, wp); acb_neg(t, t); acb_cot_pi(v, x, wp); arb_const_pi(acb_realref(u), wp); acb_mul_arb(v, v, acb_realref(u), wp); acb_rising2_ui(y, u, t, r, wp); acb_div(u, u, y, wp); acb_add(v, v, u, wp); acb_add_ui(t, t, r, wp); acb_gamma_stirling_eval(u, t, n, 1, wp); acb_sub(y, u, v, wp); } else { acb_add_ui(t, x, r, wp); acb_gamma_stirling_eval(u, t, n, 1, wp); acb_rising2_ui(y, t, x, r, wp); acb_div(t, t, y, wp); acb_sub(y, u, t, prec); } acb_clear(t); acb_clear(u); acb_clear(v); }
void fmpz_mat_mul(fmpz_mat_t C, const fmpz_mat_t A, const fmpz_mat_t B) { long dim, m, n, k, ab, bb; m = A->r; n = A->c; k = B->c; if (C == A || C == B) { fmpz_mat_t t; fmpz_mat_init(t, m, k); fmpz_mat_mul(t, A, B); fmpz_mat_swap(C, t); fmpz_mat_clear(t); return; } dim = FLINT_MIN(FLINT_MIN(m, n), k); if (dim < 10) { fmpz_mat_mul_classical(C, A, B); return; } ab = fmpz_mat_max_bits(A); bb = fmpz_mat_max_bits(B); ab = FLINT_ABS(ab); bb = FLINT_ABS(bb); if (5*(ab + bb) > dim * dim) { fmpz_mat_mul_classical(C, A, B); } else { _fmpz_mat_mul_multi_mod(C, A, B, ab + bb + FLINT_BIT_COUNT(n) + 1); } }
void _nmod_poly_mul_KS(mp_ptr out, mp_srcptr in1, long len1, mp_srcptr in2, long len2, mp_bitcnt_t bits, nmod_t mod) { long len_out = len1 + len2 - 1, limbs1, limbs2; mp_ptr mpn1, mpn2, res; if (bits == 0) { mp_bitcnt_t bits1, bits2, loglen; bits1 = _nmod_vec_max_bits(in1, len1); bits2 = (in1 == in2) ? bits1 : _nmod_vec_max_bits(in2, len2); loglen = FLINT_BIT_COUNT(len2); bits = bits1 + bits2 + loglen; } limbs1 = (len1 * bits - 1) / FLINT_BITS + 1; limbs2 = (len2 * bits - 1) / FLINT_BITS + 1; mpn1 = (mp_ptr) malloc(sizeof(mp_limb_t) * limbs1); mpn2 = (in1 == in2) ? mpn1 : (mp_ptr) malloc(sizeof(mp_limb_t) * limbs2); _nmod_poly_bit_pack(mpn1, in1, len1, bits); if (in1 != in2) _nmod_poly_bit_pack(mpn2, in2, len2, bits); res = (mp_ptr) malloc(sizeof(mp_limb_t) * (limbs1 + limbs2)); if (in1 != in2) mpn_mul(res, mpn1, limbs1, mpn2, limbs2); else mpn_mul_n(res, mpn1, mpn1, limbs1); _nmod_poly_bit_unpack(out, len_out, res, bits, mod); free(mpn2); if (in1 != in2) free(mpn1); free(res); }
void fmpz_mod_poly_radix_init(fmpz_mod_poly_radix_t D, const fmpz_mod_poly_t R, long degF) { const long degR = R->length - 1; if (degF < degR) { D->k = 0; } else { const long N = degF / degR; const long k = FLINT_BIT_COUNT(N); /* k := ceil{log{N+1}} */ const long lenV = degR * ((1L << k) - 1) + k; const long lenW = degR * ((1L << k) - 1); long i; D->V = _fmpz_vec_init(lenV + lenW); D->W = D->V + lenV; D->Rpow = flint_malloc(k * sizeof(fmpz *)); D->Rinv = flint_malloc(k * sizeof(fmpz *)); for (i = 0; i < k; i++) { D->Rpow[i] = D->V + (degR * ((1L << i) - 1) + i); D->Rinv[i] = D->W + (degR * ((1L << i) - 1)); } fmpz_init(&(D->invL)); fmpz_invmod(&(D->invL), R->coeffs + degR, &(R->p)); _fmpz_mod_poly_radix_init(D->Rpow, D->Rinv, R->coeffs, degR + 1, k, &(D->invL), &(R->p)); D->k = k; D->degR = degR; } }
n_pair_t fchain2_preinv(mp_limb_t m, mp_limb_t n, mp_limb_t ninv) { n_pair_t current = {0, 0}, old; int length; mp_limb_t power, xy; old.x = 2UL; old.y = n - 3UL; length = FLINT_BIT_COUNT(m); power = (1UL << (length - 1)); for (; length > 0; length--) { xy = n_mulmod2_preinv(old.x, old.y, n, ninv); xy = n_addmod(xy, 3UL, n); if (m & power) { current.y = n_submod(n_mulmod2_preinv(old.y, old.y, n, ninv), 2UL, n); current.x = xy; } else { current.x = n_submod(n_mulmod2_preinv(old.x, old.x, n, ninv), 2UL, n); current.y = xy; } power >>= 1; old = current; } return current; }
static void _mpq_harmonic_odd_balanced(fmpz_t num, fmpz_t den, long n) { mpz_t p, q; mp_ptr t, v; mp_size_t ts, vs; long size; if (n <= 0) { fmpz_zero(num); fmpz_one(den); return; } /* TODO: we could avoid the copying/allocation overhead when there is guaranteed to be sufficient space in res already */ size = FLINT_BIT_COUNT(n) * (n+2) + 2*FLINT_BITS; mpz_init2(p, size); mpz_init2(q, size); t = p->_mp_d; v = q->_mp_d; mpn_harmonic_odd_balanced(t, &ts, v, &vs, 1, n+1, n, 1); p->_mp_size = ts; q->_mp_size = vs; fmpz_set_mpz(num, p); fmpz_set_mpz(den, q); mpz_clear(p); mpz_clear(q); _fmpq_canonicalise(num, den); }
void mag_pow_ui_lower(mag_t z, const mag_t x, ulong e) { if (e <= 2) { if (e == 0) mag_one(z); else if (e == 1) mag_set(z, x); else mag_mul_lower(z, x, x); } else if (mag_is_inf(x)) { mag_inf(z); } else { mag_t y; int i, bits; mag_init_set(y, x); bits = FLINT_BIT_COUNT(e); for (i = bits - 2; i >= 0; i--) { mag_mul_lower(y, y, y); if (e & (1UL << i)) mag_mul_lower(y, y, x); } mag_swap(z, y); mag_clear(y); } }
void acb_rising2_ui_bs(acb_t u, acb_t v, const acb_t x, ulong n, slong prec) { if (n == 0) { acb_zero(v); acb_one(u); } else if (n == 1) { acb_set(u, x); acb_one(v); } else { acb_t t; slong wp = ARF_PREC_ADD(prec, FLINT_BIT_COUNT(n)); acb_init(t); /* support aliasing */ acb_set(t, x); bsplit(v, u, t, 0, n, wp); acb_clear(t); } }
void fmprb_lgamma(fmprb_t y, const fmprb_t x, long prec) { int reflect; long r, n, wp; fmprb_t t, u; wp = prec + FLINT_BIT_COUNT(prec); gamma_stirling_choose_param_fmprb(&reflect, &r, &n, x, 0, 0, wp); /* log(gamma(x)) = log(gamma(x+r)) - log(rf(x,r)) */ fmprb_init(t); fmprb_init(u); fmprb_add_ui(t, x, r, wp); gamma_stirling_eval_fmprb(u, t, n, 0, wp); gamma_rising_fmprb_ui_bsplit(t, x, r, wp); fmprb_log(t, t, wp); fmprb_sub(y, u, t, prec); fmprb_clear(t); fmprb_clear(u); }
int main(int argc, char *argv[]) { acb_t s, t, a, b; mag_t tol; slong prec, goal; slong N; ulong k; int integral, ifrom, ito; int i, twice, havegoal, havetol; acb_calc_integrate_opt_t options; ifrom = ito = -1; for (i = 1; i < argc; i++) { if (!strcmp(argv[i], "-i")) { if (!strcmp(argv[i+1], "all")) { ifrom = 0; ito = NUM_INTEGRALS - 1; } else { ifrom = ito = atol(argv[i+1]); if (ito < 0 || ito >= NUM_INTEGRALS) flint_abort(); } } } if (ifrom == -1) { flint_printf("Compute integrals using acb_calc_integrate.\n"); flint_printf("Usage: integrals -i n [-prec p] [-tol eps] [-twice] [...]\n\n"); flint_printf("-i n - compute integral n (0 <= n <= %d), or \"-i all\"\n", NUM_INTEGRALS - 1); flint_printf("-prec p - precision in bits (default p = 64)\n"); flint_printf("-goal p - approximate relative accuracy goal (default p)\n"); flint_printf("-tol eps - approximate absolute error goal (default 2^-p)\n"); flint_printf("-twice - run twice (to see overhead of computing nodes)\n"); flint_printf("-heap - use heap for subinterval queue\n"); flint_printf("-verbose - show information\n"); flint_printf("-verbose2 - show more information\n"); flint_printf("-deg n - use quadrature degree up to n\n"); flint_printf("-eval n - limit number of function evaluations to n\n"); flint_printf("-depth n - limit subinterval queue size to n\n\n"); flint_printf("Implemented integrals:\n"); for (integral = 0; integral < NUM_INTEGRALS; integral++) flint_printf("I%d = %s\n", integral, descr[integral]); flint_printf("\n"); return 1; } acb_calc_integrate_opt_init(options); prec = 64; twice = 0; goal = 0; havetol = havegoal = 0; acb_init(a); acb_init(b); acb_init(s); acb_init(t); mag_init(tol); for (i = 1; i < argc; i++) { if (!strcmp(argv[i], "-prec")) { prec = atol(argv[i+1]); } else if (!strcmp(argv[i], "-twice")) { twice = 1; } else if (!strcmp(argv[i], "-goal")) { goal = atol(argv[i+1]); if (goal < 0) { flint_printf("expected goal >= 0\n"); return 1; } havegoal = 1; } else if (!strcmp(argv[i], "-tol")) { arb_t x; arb_init(x); arb_set_str(x, argv[i+1], 10); arb_get_mag(tol, x); arb_clear(x); havetol = 1; } else if (!strcmp(argv[i], "-deg")) { options->deg_limit = atol(argv[i+1]); } else if (!strcmp(argv[i], "-eval")) { options->eval_limit = atol(argv[i+1]); } else if (!strcmp(argv[i], "-depth")) { options->depth_limit = atol(argv[i+1]); } else if (!strcmp(argv[i], "-verbose")) { options->verbose = 1; } else if (!strcmp(argv[i], "-verbose2")) { options->verbose = 2; } else if (!strcmp(argv[i], "-heap")) { options->use_heap = 1; } } if (!havegoal) goal = prec; if (!havetol) mag_set_ui_2exp_si(tol, 1, -prec); for (integral = ifrom; integral <= ito; integral++) { flint_printf("I%d = %s ...\n", integral, descr[integral]); for (i = 0; i < 1 + twice; i++) { TIMEIT_ONCE_START switch (integral) { case 0: acb_set_d(a, 0); acb_set_d(b, 100); acb_calc_integrate(s, f_sin, NULL, a, b, goal, tol, options, prec); break; case 1: acb_set_d(a, 0); acb_set_d(b, 1); acb_calc_integrate(s, f_atanderiv, NULL, a, b, goal, tol, options, prec); acb_mul_2exp_si(s, s, 2); break; case 2: acb_set_d(a, 0); acb_one(b); acb_mul_2exp_si(b, b, goal); acb_calc_integrate(s, f_atanderiv, NULL, a, b, goal, tol, options, prec); arb_add_error_2exp_si(acb_realref(s), -goal); acb_mul_2exp_si(s, s, 1); break; case 3: acb_set_d(a, 0); acb_set_d(b, 1); acb_calc_integrate(s, f_circle, NULL, a, b, goal, tol, options, prec); acb_mul_2exp_si(s, s, 2); break; case 4: acb_set_d(a, 0); acb_set_d(b, 8); acb_calc_integrate(s, f_rump, NULL, a, b, goal, tol, options, prec); break; case 5: acb_set_d(a, 1); acb_set_d(b, 101); acb_calc_integrate(s, f_floor, NULL, a, b, goal, tol, options, prec); break; case 6: acb_set_d(a, 0); acb_set_d(b, 1); acb_calc_integrate(s, f_helfgott, NULL, a, b, goal, tol, options, prec); break; case 7: acb_zero(s); acb_set_d_d(a, -1.0, -1.0); acb_set_d_d(b, 2.0, -1.0); acb_calc_integrate(t, f_zeta, NULL, a, b, goal, tol, options, prec); acb_add(s, s, t, prec); acb_set_d_d(a, 2.0, -1.0); acb_set_d_d(b, 2.0, 1.0); acb_calc_integrate(t, f_zeta, NULL, a, b, goal, tol, options, prec); acb_add(s, s, t, prec); acb_set_d_d(a, 2.0, 1.0); acb_set_d_d(b, -1.0, 1.0); acb_calc_integrate(t, f_zeta, NULL, a, b, goal, tol, options, prec); acb_add(s, s, t, prec); acb_set_d_d(a, -1.0, 1.0); acb_set_d_d(b, -1.0, -1.0); acb_calc_integrate(t, f_zeta, NULL, a, b, goal, tol, options, prec); acb_add(s, s, t, prec); acb_const_pi(t, prec); acb_div(s, s, t, prec); acb_mul_2exp_si(s, s, -1); acb_div_onei(s, s); break; case 8: acb_set_d(a, 0); acb_set_d(b, 1); acb_calc_integrate(s, f_essing, NULL, a, b, goal, tol, options, prec); break; case 9: acb_set_d(a, 0); acb_set_d(b, 1); acb_calc_integrate(s, f_essing2, NULL, a, b, goal, tol, options, prec); break; case 10: acb_set_d(a, 0); acb_set_d(b, 10000); acb_calc_integrate(s, f_factorial1000, NULL, a, b, goal, tol, options, prec); break; case 11: acb_set_d_d(a, 1.0, 0.0); acb_set_d_d(b, 1.0, 1000.0); acb_calc_integrate(s, f_gamma, NULL, a, b, goal, tol, options, prec); break; case 12: acb_set_d(a, -10.0); acb_set_d(b, 10.0); acb_calc_integrate(s, f_sin_plus_small, NULL, a, b, goal, tol, options, prec); break; case 13: acb_set_d(a, -1020.0); acb_set_d(b, -1010.0); acb_calc_integrate(s, f_exp, NULL, a, b, goal, tol, options, prec); break; case 14: acb_set_d(a, 0); acb_set_d(b, ceil(sqrt(goal * 0.693147181) + 1.0)); acb_calc_integrate(s, f_gaussian, NULL, a, b, goal, tol, options, prec); acb_mul(b, b, b, prec); acb_neg(b, b); acb_exp(b, b, prec); arb_add_error(acb_realref(s), acb_realref(b)); break; case 15: acb_set_d(a, 0.0); acb_set_d(b, 1.0); acb_calc_integrate(s, f_spike, NULL, a, b, goal, tol, options, prec); break; case 16: acb_set_d(a, 0.0); acb_set_d(b, 8.0); acb_calc_integrate(s, f_monster, NULL, a, b, goal, tol, options, prec); break; case 17: acb_set_d(a, 0); acb_set_d(b, ceil(goal * 0.693147181 + 1.0)); acb_calc_integrate(s, f_sech, NULL, a, b, goal, tol, options, prec); acb_neg(b, b); acb_exp(b, b, prec); acb_mul_2exp_si(b, b, 1); arb_add_error(acb_realref(s), acb_realref(b)); break; case 18: acb_set_d(a, 0); acb_set_d(b, ceil(goal * 0.693147181 / 3.0 + 2.0)); acb_calc_integrate(s, f_sech3, NULL, a, b, goal, tol, options, prec); acb_neg(b, b); acb_mul_ui(b, b, 3, prec); acb_exp(b, b, prec); acb_mul_2exp_si(b, b, 3); acb_div_ui(b, b, 3, prec); arb_add_error(acb_realref(s), acb_realref(b)); break; case 19: if (goal < 0) abort(); /* error bound 2^-N (1+N) when truncated at 2^-N */ N = goal + FLINT_BIT_COUNT(goal); acb_one(a); acb_mul_2exp_si(a, a, -N); acb_one(b); acb_calc_integrate(s, f_log_div1p, NULL, a, b, goal, tol, options, prec); acb_set_ui(b, N + 1); acb_mul_2exp_si(b, b, -N); arb_add_error(acb_realref(s), acb_realref(b)); break; case 20: if (goal < 0) abort(); /* error bound (N+1) exp(-N) when truncated at N */ N = goal + FLINT_BIT_COUNT(goal); acb_zero(a); acb_set_ui(b, N); acb_calc_integrate(s, f_log_div1p_transformed, NULL, a, b, goal, tol, options, prec); acb_neg(b, b); acb_exp(b, b, prec); acb_mul_ui(b, b, N + 1, prec); arb_add_error(acb_realref(s), acb_realref(b)); break; case 21: acb_zero(s); N = 10; acb_set_d_d(a, 0.5, -0.5); acb_set_d_d(b, 0.5, 0.5); acb_calc_integrate(t, f_elliptic_p_laurent_n, &N, a, b, goal, tol, options, prec); acb_add(s, s, t, prec); acb_set_d_d(a, 0.5, 0.5); acb_set_d_d(b, -0.5, 0.5); acb_calc_integrate(t, f_elliptic_p_laurent_n, &N, a, b, goal, tol, options, prec); acb_add(s, s, t, prec); acb_set_d_d(a, -0.5, 0.5); acb_set_d_d(b, -0.5, -0.5); acb_calc_integrate(t, f_elliptic_p_laurent_n, &N, a, b, goal, tol, options, prec); acb_add(s, s, t, prec); acb_set_d_d(a, -0.5, -0.5); acb_set_d_d(b, 0.5, -0.5); acb_calc_integrate(t, f_elliptic_p_laurent_n, &N, a, b, goal, tol, options, prec); acb_add(s, s, t, prec); acb_const_pi(t, prec); acb_div(s, s, t, prec); acb_mul_2exp_si(s, s, -1); acb_div_onei(s, s); break; case 22: acb_zero(s); N = 1000; acb_set_d_d(a, 100.0, 0.0); acb_set_d_d(b, 100.0, N); acb_calc_integrate(t, f_zeta_frac, NULL, a, b, goal, tol, options, prec); acb_add(s, s, t, prec); acb_set_d_d(a, 100, N); acb_set_d_d(b, 0.5, N); acb_calc_integrate(t, f_zeta_frac, NULL, a, b, goal, tol, options, prec); acb_add(s, s, t, prec); acb_div_onei(s, s); arb_zero(acb_imagref(s)); acb_set_ui(t, N); acb_dirichlet_hardy_theta(t, t, NULL, NULL, 1, prec); acb_add(s, s, t, prec); acb_const_pi(t, prec); acb_div(s, s, t, prec); acb_add_ui(s, s, 1, prec); break; case 23: acb_set_d(a, 0.0); acb_set_d(b, 1000.0); acb_calc_integrate(s, f_lambertw, NULL, a, b, goal, tol, options, prec); break; case 24: acb_set_d(a, 0.0); acb_const_pi(b, prec); acb_calc_integrate(s, f_max_sin_cos, NULL, a, b, goal, tol, options, prec); break; case 25: acb_set_si(a, -1); acb_set_si(b, 1); acb_calc_integrate(s, f_erf_bent, NULL, a, b, goal, tol, options, prec); break; case 26: acb_set_si(a, -10); acb_set_si(b, 10); acb_calc_integrate(s, f_airy_ai, NULL, a, b, goal, tol, options, prec); break; case 27: acb_set_si(a, 0); acb_set_si(b, 10); acb_calc_integrate(s, f_horror, NULL, a, b, goal, tol, options, prec); break; case 28: acb_set_d_d(a, -1, -1); acb_set_d_d(b, -1, 1); acb_calc_integrate(s, f_sqrt, NULL, a, b, goal, tol, options, prec); break; case 29: acb_set_d(a, 0); acb_set_d(b, ceil(sqrt(goal * 0.693147181) + 1.0)); acb_calc_integrate(s, f_gaussian_twist, NULL, a, b, goal, tol, options, prec); acb_mul(b, b, b, prec); acb_neg(b, b); acb_exp(b, b, prec); arb_add_error(acb_realref(s), acb_realref(b)); arb_add_error(acb_imagref(s), acb_realref(b)); break; case 30: acb_set_d(a, 0); acb_set_d(b, ceil(goal * 0.693147181 + 1.0)); acb_calc_integrate(s, f_exp_airy, NULL, a, b, goal, tol, options, prec); acb_neg(b, b); acb_exp(b, b, prec); acb_mul_2exp_si(b, b, 1); arb_add_error(acb_realref(s), acb_realref(b)); break; case 31: acb_zero(a); acb_const_pi(b, prec); acb_calc_integrate(s, f_sin_cos_frac, NULL, a, b, goal, tol, options, prec); break; case 32: acb_zero(a); acb_set_ui(b, 3); acb_calc_integrate(s, f_sin_near_essing, NULL, a, b, goal, tol, options, prec); break; case 33: acb_zero(a); acb_zero(b); k = 3; scaled_bessel_select_N(acb_realref(b), k, prec); acb_calc_integrate(s, f_scaled_bessel, &k, a, b, goal, tol, options, prec); scaled_bessel_tail_bound(acb_realref(a), k, acb_realref(b), prec); arb_add_error(acb_realref(s), acb_realref(a)); break; case 34: acb_zero(a); acb_zero(b); k = 15; scaled_bessel_select_N(acb_realref(b), k, prec); acb_calc_integrate(s, f_scaled_bessel, &k, a, b, goal, tol, options, prec); scaled_bessel_tail_bound(acb_realref(a), k, acb_realref(b), prec); arb_add_error(acb_realref(s), acb_realref(a)); break; case 35: acb_set_d_d(a, -1, -1); acb_set_d_d(b, -1, 1); acb_calc_integrate(s, f_rsqrt, NULL, a, b, goal, tol, options, prec); break; default: abort(); } TIMEIT_ONCE_STOP } flint_printf("I%d = ", integral); acb_printn(s, 3.333 * prec, 0); flint_printf("\n\n"); } acb_clear(a); acb_clear(b); acb_clear(s); acb_clear(t); mag_clear(tol); flint_cleanup(); return 0; }