/* compose by poly2 = a*x^n + c, no aliasing; n >= 1 */ void _arb_poly_compose_axnc(arb_ptr res, arb_srcptr poly1, slong len1, const arb_t c, const arb_t a, slong n, slong prec) { slong i; _arb_vec_set_round(res, poly1, len1, prec); /* shift by c (c = 0 case will be fast) */ _arb_poly_taylor_shift(res, c, len1, prec); /* multiply by powers of a */ if (!arb_is_one(a)) { if (arb_equal_si(a, -1)) { for (i = 1; i < len1; i += 2) arb_neg(res + i, res + i); } else if (len1 == 2) { arb_mul(res + 1, res + 1, a, prec); } else { arb_t t; arb_init(t); arb_set(t, a); for (i = 1; i < len1; i++) { arb_mul(res + i, res + i, t, prec); if (i + 1 < len1) arb_mul(t, t, a, prec); } arb_clear(t); } } /* stretch */ for (i = len1 - 1; i >= 1 && n > 1; i--) { arb_swap(res + i * n, res + i); _arb_vec_zero(res + (i - 1) * n + 1, n - 1); } }
void arb_acosh(arb_t z, const arb_t x, slong prec) { if (arb_is_one(x)) { arb_zero(z); } else { arb_t t; arb_init(t); arb_mul(t, x, x, prec + 4); arb_sub_ui(t, t, 1, prec + 4); arb_sqrt(t, t, prec + 4); arb_add(t, t, x, prec + 4); arb_log(z, t, prec); arb_clear(t); } }
void arb_hypgeom_sum(arb_t P, arb_t Q, const hypgeom_t hyp, long n, long prec) { if (n < 1) { arb_zero(P); arb_one(Q); } else { arb_t B, T; arb_init(B); arb_init(T); bsplit_recursive_arb(P, Q, B, T, hyp, 0, n, 0, prec); if (!arb_is_one(B)) arb_mul(Q, Q, B, prec); arb_swap(P, T); arb_clear(B); arb_clear(T); } }
void _arb_poly_taylor_shift_horner(arb_ptr poly, const arb_t c, slong n, slong prec) { slong i, j; if (arb_is_one(c)) { for (i = n - 2; i >= 0; i--) for (j = i; j < n - 1; j++) arb_add(poly + j, poly + j, poly + j + 1, prec); } else if (arb_equal_si(c, -1)) { for (i = n - 2; i >= 0; i--) for (j = i; j < n - 1; j++) arb_sub(poly + j, poly + j, poly + j + 1, prec); } else if (!arb_is_zero(c)) { for (i = n - 2; i >= 0; i--) for (j = i; j < n - 1; j++) arb_addmul(poly + j, poly + j + 1, c, prec); } }
void _arb_poly_exp_series(arb_ptr f, arb_srcptr h, slong hlen, slong n, slong prec) { hlen = FLINT_MIN(hlen, n); if (hlen == 1) { arb_exp(f, h, prec); _arb_vec_zero(f + 1, n - 1); } else if (n == 2) { arb_exp(f, h, prec); arb_mul(f + 1, f, h + 1, prec); /* safe since hlen >= 2 */ } else if (_arb_vec_is_zero(h + 1, hlen - 2)) /* h = a + bx^d */ { slong i, j, d = hlen - 1; arb_t t; arb_init(t); arb_set(t, h + d); arb_exp(f, h, prec); for (i = 1, j = d; j < n; j += d, i++) { arb_mul(f + j, f + j - d, t, prec); arb_div_ui(f + j, f + j, i, prec); _arb_vec_zero(f + j - d + 1, hlen - 2); } _arb_vec_zero(f + j - d + 1, n - (j - d + 1)); arb_clear(t); } else if (hlen <= arb_poly_newton_exp_cutoff) { _arb_poly_exp_series_basecase(f, h, hlen, n, prec); } else { arb_ptr g, t; arb_t u; int fix; g = _arb_vec_init((n + 1) / 2); fix = (hlen < n || h == f || !arb_is_zero(h)); if (fix) { t = _arb_vec_init(n); _arb_vec_set(t + 1, h + 1, hlen - 1); } else t = (arb_ptr) h; arb_init(u); arb_exp(u, h, prec); _arb_poly_exp_series_newton(f, g, t, n, prec, 0, arb_poly_newton_exp_cutoff); if (!arb_is_one(u)) _arb_vec_scalar_mul(f, f, n, u, prec); _arb_vec_clear(g, (n + 1) / 2); if (fix) _arb_vec_clear(t, n); arb_clear(u); } }
static void bsplit_recursive_arb(arb_t P, arb_t Q, arb_t B, arb_t T, const hypgeom_t hyp, long a, long b, int cont, long prec) { if (b - a < 4) { fmpz_t PP, QQ, BB, TT; fmpz_init(PP); fmpz_init(QQ); fmpz_init(BB); fmpz_init(TT); bsplit_recursive_fmpz(PP, QQ, BB, TT, hyp, a, b, cont); arb_set_fmpz(P, PP); arb_set_fmpz(Q, QQ); arb_set_fmpz(B, BB); arb_set_fmpz(T, TT); fmpz_clear(PP); fmpz_clear(QQ); fmpz_clear(BB); fmpz_clear(TT); } else { long m; arb_t P2, Q2, B2, T2; m = (a + b) / 2; arb_init(P2); arb_init(Q2); arb_init(B2); arb_init(T2); bsplit_recursive_arb(P, Q, B, T, hyp, a, m, 1, prec); bsplit_recursive_arb(P2, Q2, B2, T2, hyp, m, b, 1, prec); if (arb_is_one(B) && arb_is_one(B2)) { arb_mul(T, T, Q2, prec); arb_addmul(T, P, T2, prec); } else { arb_mul(T, T, B2, prec); arb_mul(T, T, Q2, prec); arb_mul(T2, T2, B, prec); arb_addmul(T, P, T2, prec); } arb_mul(B, B, B2, prec); arb_mul(Q, Q, Q2, prec); if (cont) arb_mul(P, P, P2, prec); arb_clear(P2); arb_clear(Q2); arb_clear(B2); arb_clear(T2); } }
void _arb_poly_inv_series(arb_ptr Qinv, arb_srcptr Q, slong Qlen, slong len, slong prec) { Qlen = FLINT_MIN(Qlen, len); arb_inv(Qinv, Q, prec); if (Qlen == 1) { _arb_vec_zero(Qinv + 1, len - 1); } else if (len == 2) { arb_mul(Qinv + 1, Qinv, Qinv, prec); arb_mul(Qinv + 1, Qinv + 1, Q + 1, prec); arb_neg(Qinv + 1, Qinv + 1); } else { slong i, j, blen; /* The basecase algorithm is faster for much larger Qlen or len than this, but unfortunately also much less numerically stable. */ if (Qlen == 2 || len <= 8) blen = len; else blen = FLINT_MIN(len, 4); for (i = 1; i < blen; i++) { arb_mul(Qinv + i, Q + 1, Qinv + i - 1, prec); for (j = 2; j < FLINT_MIN(i + 1, Qlen); j++) arb_addmul(Qinv + i, Q + j, Qinv + i - j, prec); if (!arb_is_one(Qinv)) arb_mul(Qinv + i, Qinv + i, Qinv, prec); arb_neg(Qinv + i, Qinv + i); } if (len > blen) { slong Qnlen, Wlen, W2len; arb_ptr W; W = _arb_vec_init(len); NEWTON_INIT(blen, len) NEWTON_LOOP(m, n) Qnlen = FLINT_MIN(Qlen, n); Wlen = FLINT_MIN(Qnlen + m - 1, n); W2len = Wlen - m; MULLOW(W, Q, Qnlen, Qinv, m, Wlen, prec); MULLOW(Qinv + m, Qinv, m, W + m, W2len, n - m, prec); _arb_vec_neg(Qinv + m, Qinv + m, n - m); NEWTON_END_LOOP NEWTON_END _arb_vec_clear(W, len); } } }
void _arb_poly_zeta_series(arb_ptr res, arb_srcptr h, long hlen, const arb_t a, int deflate, long len, long prec) { long i; acb_t cs, ca; acb_ptr z; arb_ptr t, u; if (arb_contains_nonpositive(a)) { _arb_vec_indeterminate(res, len); return; } hlen = FLINT_MIN(hlen, len); z = _acb_vec_init(len); t = _arb_vec_init(len); u = _arb_vec_init(len); acb_init(cs); acb_init(ca); /* use reflection formula */ if (arf_sgn(arb_midref(h)) < 0 && arb_is_one(a)) { /* zeta(s) = (2*pi)**s * sin(pi*s/2) / pi * gamma(1-s) * zeta(1-s) */ arb_t pi; arb_ptr f, s1, s2, s3, s4; arb_init(pi); f = _arb_vec_init(2); s1 = _arb_vec_init(len); s2 = _arb_vec_init(len); s3 = _arb_vec_init(len); s4 = _arb_vec_init(len); arb_const_pi(pi, prec); /* s1 = (2*pi)**s */ arb_mul_2exp_si(pi, pi, 1); _arb_poly_pow_cpx(s1, pi, h, len, prec); arb_mul_2exp_si(pi, pi, -1); /* s2 = sin(pi*s/2) / pi */ arb_set(f, h); arb_one(f + 1); arb_mul_2exp_si(f, f, -1); arb_mul_2exp_si(f + 1, f + 1, -1); _arb_poly_sin_pi_series(s2, f, 2, len, prec); _arb_vec_scalar_div(s2, s2, len, pi, prec); /* s3 = gamma(1-s) */ arb_sub_ui(f, h, 1, prec); arb_neg(f, f); arb_set_si(f + 1, -1); _arb_poly_gamma_series(s3, f, 2, len, prec); /* s4 = zeta(1-s) */ arb_sub_ui(f, h, 1, prec); arb_neg(f, f); acb_set_arb(cs, f); acb_one(ca); _acb_poly_zeta_cpx_series(z, cs, ca, 0, len, prec); for (i = 0; i < len; i++) arb_set(s4 + i, acb_realref(z + i)); for (i = 1; i < len; i += 2) arb_neg(s4 + i, s4 + i); _arb_poly_mullow(u, s1, len, s2, len, len, prec); _arb_poly_mullow(s1, s3, len, s4, len, len, prec); _arb_poly_mullow(t, u, len, s1, len, len, prec); /* add 1/(1-(s+t)) = 1/(1-s) + t/(1-s)^2 + ... */ if (deflate) { arb_sub_ui(u, h, 1, prec); arb_neg(u, u); arb_inv(u, u, prec); for (i = 1; i < len; i++) arb_mul(u + i, u + i - 1, u, prec); _arb_vec_add(t, t, u, len, prec); } arb_clear(pi); _arb_vec_clear(f, 2); _arb_vec_clear(s1, len); _arb_vec_clear(s2, len); _arb_vec_clear(s3, len); _arb_vec_clear(s4, len); } else { acb_set_arb(cs, h); acb_set_arb(ca, a); _acb_poly_zeta_cpx_series(z, cs, ca, deflate, len, prec); for (i = 0; i < len; i++) arb_set(t + i, acb_realref(z + i)); } /* compose with nonconstant part */ arb_zero(u); _arb_vec_set(u + 1, h + 1, hlen - 1); _arb_poly_compose_series(res, t, len, u, hlen, len, prec); _acb_vec_clear(z, len); _arb_vec_clear(t, len); _arb_vec_clear(u, len); acb_init(cs); acb_init(ca); }