void _arb_poly_sinh_cosh_series_exponential(arb_ptr s, arb_ptr c, const arb_srcptr h, slong hlen, slong len, slong prec) { arb_ptr t, u, v; arb_t s0, c0; hlen = FLINT_MIN(hlen, len); if (hlen == 1) { arb_sinh_cosh(s, c, h, prec); _arb_vec_zero(s + 1, len - 1); _arb_vec_zero(c + 1, len - 1); return; } arb_init(s0); arb_init(c0); t = _arb_vec_init(3 * len); u = t + len; v = u + len; arb_sinh_cosh(s0, c0, h, prec); _arb_vec_set(t + 1, h + 1, hlen - 1); _arb_poly_exp_series(t, t, len, len, prec); /* todo: part of the inverse could be avoided since exp computes it internally to half the length */ _arb_poly_inv_series(u, t, len, len, prec); /* hyperbolic sine */ _arb_vec_sub(s, t, u, len, prec); _arb_vec_scalar_mul_2exp_si(s, s, len, -1); /* hyperbolic cosine */ _arb_vec_add(c, t, u, len, prec); _arb_vec_scalar_mul_2exp_si(c, c, len, -1); /* sinh(h0 + h1) = cosh(h0) sinh(h1) + sinh(h0) cosh(h1) cosh(h0 + h1) = cosh(h0) cosh(h1) + sinh(h0) sinh(h1) */ if (!arb_is_zero(s0)) { _arb_vec_scalar_mul(t, s, len, c0, prec); _arb_vec_scalar_mul(u, c, len, s0, prec); _arb_vec_scalar_mul(v, s, len, s0, prec); _arb_vec_add(s, t, u, len, prec); _arb_vec_scalar_mul(t, c, len, c0, prec); _arb_vec_add(c, t, v, len, prec); } _arb_vec_clear(t, 3 * len); arb_clear(s0); arb_clear(c0); }
/* with inverse=1 simultaneously computes g = exp(-x) to length n with inverse=0 uses g as scratch space, computing g = exp(-x) only to length (n+1)/2 */ static void _arb_poly_exp_series_newton(arb_ptr f, arb_ptr g, arb_srcptr h, slong len, slong prec, int inverse, slong cutoff) { slong alloc; arb_ptr T, U, hprime; alloc = 3 * len; T = _arb_vec_init(alloc); U = T + len; hprime = U + len; _arb_poly_derivative(hprime, h, len, prec); arb_zero(hprime + len - 1); NEWTON_INIT(cutoff, len) /* f := exp(h) + O(x^m), g := exp(-h) + O(x^m2) */ NEWTON_BASECASE(n) _arb_poly_exp_series_basecase(f, h, n, n, prec); _arb_poly_inv_series(g, f, (n + 1) / 2, (n + 1) / 2, prec); NEWTON_END_BASECASE /* extend from length m to length n */ NEWTON_LOOP(m, n) slong m2 = (m + 1) / 2; slong l = m - 1; /* shifted for derivative */ /* g := exp(-h) + O(x^m) */ _arb_poly_mullow(T, f, m, g, m2, m, prec); _arb_poly_mullow(g + m2, g, m2, T + m2, m - m2, m - m2, prec); _arb_vec_neg(g + m2, g + m2, m - m2); /* U := h' + g (f' - f h') + O(x^(n-1)) Note: should replace h' by h' mod x^(m-1) */ _arb_vec_zero(f + m, n - m); _arb_poly_mullow(T, f, n, hprime, n, n, prec); /* should be mulmid */ _arb_poly_derivative(U, f, n, prec); arb_zero(U + n - 1); /* should skip low terms */ _arb_vec_sub(U + l, U + l, T + l, n - l, prec); _arb_poly_mullow(T + l, g, n - m, U + l, n - m, n - m, prec); _arb_vec_add(U + l, hprime + l, T + l, n - m, prec); /* f := f + f * (h - int U) + O(x^n) = exp(h) + O(x^n) */ _arb_poly_integral(U, U, n, prec); /* should skip low terms */ _arb_vec_sub(U + m, h + m, U + m, n - m, prec); _arb_poly_mullow(f + m, f, n - m, U + m, n - m, n - m, prec); /* g := exp(-h) + O(x^n) */ /* not needed if we only want exp(x) */ if (n == len && inverse) { _arb_poly_mullow(T, f, n, g, m, n, prec); _arb_poly_mullow(g + m, g, m, T + m, n - m, n - m, prec); _arb_vec_neg(g + m, g + m, n - m); } NEWTON_END_LOOP NEWTON_END _arb_vec_clear(T, alloc); }
void keiper_li_series(arb_ptr z, slong len, slong prec) { arb_ptr t, u, v; t = _arb_vec_init(len); u = _arb_vec_init(len); v = _arb_vec_init(len); /* -zeta(s) */ flint_printf("zeta: "); TIMEIT_ONCE_START arb_zero(t + 0); arb_one(t + 1); arb_one(u); _arb_poly_zeta_series(v, t, 2, u, 0, len, prec); _arb_vec_neg(v, v, len); TIMEIT_ONCE_STOP SHOW_MEMORY_USAGE /* logarithm */ flint_printf("log: "); TIMEIT_ONCE_START _arb_poly_log_series(t, v, len, len, prec); TIMEIT_ONCE_STOP /* add log(gamma(1+s/2)) */ flint_printf("gamma: "); TIMEIT_ONCE_START arb_one(u); arb_one(u + 1); arb_mul_2exp_si(u + 1, u + 1, -1); _arb_poly_lgamma_series(v, u, 2, len, prec); _arb_vec_add(t, t, v, len, prec); TIMEIT_ONCE_STOP /* subtract 0.5 s log(pi) */ arb_const_pi(u, prec); arb_log(u, u, prec); arb_mul_2exp_si(u, u, -1); arb_sub(t + 1, t + 1, u, prec); /* add log(1-s) */ arb_one(u); arb_set_si(u + 1, -1); _arb_poly_log_series(v, u, 2, len, prec); _arb_vec_add(t, t, v, len, prec); /* binomial transform */ flint_printf("binomial transform: "); TIMEIT_ONCE_START arb_set(z, t); _arb_vec_neg(t + 1, t + 1, len - 1); _arb_poly_binomial_transform(z + 1, t + 1, len - 1, len - 1, prec); TIMEIT_ONCE_STOP _arb_vec_clear(t, len); _arb_vec_clear(u, len); _arb_vec_clear(v, len); }
void _acb_poly_mullow_transpose_gauss(acb_ptr res, acb_srcptr poly1, slong len1, acb_srcptr poly2, slong len2, slong n, slong prec) { arb_ptr a, b, c, d, e, f, w; arb_ptr t, u, v; slong i; len1 = FLINT_MIN(len1, n); len2 = FLINT_MIN(len2, n); w = flint_malloc(sizeof(arb_struct) * (2 * (len1 + len2 + n))); a = w; b = a + len1; c = b + len1; d = c + len2; e = d + len2; f = e + n; t = _arb_vec_init(n); u = _arb_vec_init(n); v = _arb_vec_init(n); for (i = 0; i < len1; i++) { a[i] = *acb_realref(poly1 + i); b[i] = *acb_imagref(poly1 + i); } for (i = 0; i < len2; i++) { c[i] = *acb_realref(poly2 + i); d[i] = *acb_imagref(poly2 + i); } for (i = 0; i < n; i++) { e[i] = *acb_realref(res + i); f[i] = *acb_imagref(res + i); } _arb_vec_add(t, a, b, len1, prec); _arb_vec_add(u, c, d, len2, prec); _arb_poly_mullow(v, t, len1, u, len2, n, prec); _arb_poly_mullow(t, a, len1, c, len2, n, prec); _arb_poly_mullow(u, b, len1, d, len2, n, prec); _arb_vec_sub(e, t, u, n, prec); _arb_vec_sub(f, v, t, n, prec); _arb_vec_sub(f, f, u, n, prec); for (i = 0; i < n; i++) { *acb_realref(res + i) = e[i]; *acb_imagref(res + i) = f[i]; } _arb_vec_clear(t, n); _arb_vec_clear(u, n); _arb_vec_clear(v, n); flint_free(w); }
void _arb_poly_zeta_series(arb_ptr res, arb_srcptr h, long hlen, const arb_t a, int deflate, long len, long prec) { long i; acb_t cs, ca; acb_ptr z; arb_ptr t, u; if (arb_contains_nonpositive(a)) { _arb_vec_indeterminate(res, len); return; } hlen = FLINT_MIN(hlen, len); z = _acb_vec_init(len); t = _arb_vec_init(len); u = _arb_vec_init(len); acb_init(cs); acb_init(ca); /* use reflection formula */ if (arf_sgn(arb_midref(h)) < 0 && arb_is_one(a)) { /* zeta(s) = (2*pi)**s * sin(pi*s/2) / pi * gamma(1-s) * zeta(1-s) */ arb_t pi; arb_ptr f, s1, s2, s3, s4; arb_init(pi); f = _arb_vec_init(2); s1 = _arb_vec_init(len); s2 = _arb_vec_init(len); s3 = _arb_vec_init(len); s4 = _arb_vec_init(len); arb_const_pi(pi, prec); /* s1 = (2*pi)**s */ arb_mul_2exp_si(pi, pi, 1); _arb_poly_pow_cpx(s1, pi, h, len, prec); arb_mul_2exp_si(pi, pi, -1); /* s2 = sin(pi*s/2) / pi */ arb_set(f, h); arb_one(f + 1); arb_mul_2exp_si(f, f, -1); arb_mul_2exp_si(f + 1, f + 1, -1); _arb_poly_sin_pi_series(s2, f, 2, len, prec); _arb_vec_scalar_div(s2, s2, len, pi, prec); /* s3 = gamma(1-s) */ arb_sub_ui(f, h, 1, prec); arb_neg(f, f); arb_set_si(f + 1, -1); _arb_poly_gamma_series(s3, f, 2, len, prec); /* s4 = zeta(1-s) */ arb_sub_ui(f, h, 1, prec); arb_neg(f, f); acb_set_arb(cs, f); acb_one(ca); _acb_poly_zeta_cpx_series(z, cs, ca, 0, len, prec); for (i = 0; i < len; i++) arb_set(s4 + i, acb_realref(z + i)); for (i = 1; i < len; i += 2) arb_neg(s4 + i, s4 + i); _arb_poly_mullow(u, s1, len, s2, len, len, prec); _arb_poly_mullow(s1, s3, len, s4, len, len, prec); _arb_poly_mullow(t, u, len, s1, len, len, prec); /* add 1/(1-(s+t)) = 1/(1-s) + t/(1-s)^2 + ... */ if (deflate) { arb_sub_ui(u, h, 1, prec); arb_neg(u, u); arb_inv(u, u, prec); for (i = 1; i < len; i++) arb_mul(u + i, u + i - 1, u, prec); _arb_vec_add(t, t, u, len, prec); } arb_clear(pi); _arb_vec_clear(f, 2); _arb_vec_clear(s1, len); _arb_vec_clear(s2, len); _arb_vec_clear(s3, len); _arb_vec_clear(s4, len); } else { acb_set_arb(cs, h); acb_set_arb(ca, a); _acb_poly_zeta_cpx_series(z, cs, ca, deflate, len, prec); for (i = 0; i < len; i++) arb_set(t + i, acb_realref(z + i)); } /* compose with nonconstant part */ arb_zero(u); _arb_vec_set(u + 1, h + 1, hlen - 1); _arb_poly_compose_series(res, t, len, u, hlen, len, prec); _acb_vec_clear(z, len); _arb_vec_clear(t, len); _arb_vec_clear(u, len); acb_init(cs); acb_init(ca); }
void _arb_poly_lgamma_series(arb_ptr res, arb_srcptr h, slong hlen, slong len, slong prec) { int reflect; slong r, n, wp; arb_t zr; arb_ptr t, u; if (!arb_is_positive(h)) { _arb_vec_indeterminate(res, len); return; } hlen = FLINT_MIN(hlen, len); wp = prec + FLINT_BIT_COUNT(prec); t = _arb_vec_init(len); u = _arb_vec_init(len); arb_init(zr); /* use zeta values at small integers */ if (arb_is_int(h) && (arf_cmpabs_ui(arb_midref(h), prec / 2) < 0)) { r = arf_get_si(arb_midref(h), ARF_RND_DOWN); if (r <= 0) { _arb_vec_indeterminate(res, len); goto cleanup; } else { _arb_poly_lgamma_series_at_one(u, len, wp); if (r != 1) { arb_one(zr); _log_rising_ui_series(t, zr, r - 1, len, wp); _arb_vec_add(u, u, t, len, wp); } } } else if (len <= 2) { arb_lgamma(u, h, wp); if (len == 2) arb_digamma(u + 1, h, wp); } else { /* otherwise use Stirling series */ arb_gamma_stirling_choose_param(&reflect, &r, &n, h, 0, 0, wp); arb_add_ui(zr, h, r, wp); _arb_poly_gamma_stirling_eval(u, zr, n, len, wp); if (r != 0) { _log_rising_ui_series(t, h, r, len, wp); _arb_vec_sub(u, u, t, len, wp); } } /* compose with nonconstant part */ arb_zero(t); _arb_vec_set(t + 1, h + 1, hlen - 1); _arb_poly_compose_series(res, u, len, t, hlen, len, prec); cleanup: arb_clear(zr); _arb_vec_clear(t, len); _arb_vec_clear(u, len); }
void _arb_poly_sin_cos_series_tangent(arb_ptr s, arb_ptr c, arb_srcptr h, slong hlen, slong len, slong prec, int times_pi) { arb_ptr t, u, v; arb_t s0, c0; hlen = FLINT_MIN(hlen, len); if (hlen == 1) { if (times_pi) arb_sin_cos_pi(s, c, h, prec); else arb_sin_cos(s, c, h, prec); _arb_vec_zero(s + 1, len - 1); _arb_vec_zero(c + 1, len - 1); return; } /* sin(x) = 2*tan(x/2)/(1+tan(x/2)^2) cos(x) = (1-tan(x/2)^2)/(1+tan(x/2)^2) */ arb_init(s0); arb_init(c0); t = _arb_vec_init(3 * len); u = t + len; v = u + len; /* sin, cos of h0 */ if (times_pi) arb_sin_cos_pi(s0, c0, h, prec); else arb_sin_cos(s0, c0, h, prec); /* t = tan((h-h0)/2) */ arb_zero(u); _arb_vec_scalar_mul_2exp_si(u + 1, h + 1, hlen - 1, -1); if (times_pi) { arb_const_pi(t, prec); _arb_vec_scalar_mul(u + 1, u + 1, hlen - 1, t, prec); } _arb_poly_tan_series(t, u, hlen, len, prec); /* v = 1 + t^2 */ _arb_poly_mullow(v, t, len, t, len, len, prec); arb_add_ui(v, v, 1, prec); /* u = 1/(1+t^2) */ _arb_poly_inv_series(u, v, len, len, prec); /* sine */ _arb_poly_mullow(s, t, len, u, len, len, prec); _arb_vec_scalar_mul_2exp_si(s, s, len, 1); /* cosine */ arb_sub_ui(v, v, 2, prec); _arb_vec_neg(v, v, len); _arb_poly_mullow(c, v, len, u, len, len, prec); /* sin(h0 + h1) = cos(h0) sin(h1) + sin(h0) cos(h1) cos(h0 + h1) = cos(h0) cos(h1) - sin(h0) sin(h1) */ if (!arb_is_zero(s0)) { _arb_vec_scalar_mul(t, s, len, c0, prec); _arb_vec_scalar_mul(u, c, len, s0, prec); _arb_vec_scalar_mul(v, s, len, s0, prec); _arb_vec_add(s, t, u, len, prec); _arb_vec_scalar_mul(t, c, len, c0, prec); _arb_vec_sub(c, t, v, len, prec); } _arb_vec_clear(t, 3 * len); arb_clear(s0); arb_clear(c0); }
void _acb_poly_mullow_transpose(acb_ptr res, acb_srcptr poly1, slong len1, acb_srcptr poly2, slong len2, slong n, slong prec) { arb_ptr a, b, c, d, e, f, w; arb_ptr t; slong i; len1 = FLINT_MIN(len1, n); len2 = FLINT_MIN(len2, n); w = flint_malloc(sizeof(arb_struct) * (2 * (len1 + len2 + n))); a = w; b = a + len1; c = b + len1; d = c + len2; e = d + len2; f = e + n; /* (e+fi) = (a+bi)(c+di) = (ac - bd) + (ad + bc)i */ t = _arb_vec_init(n); for (i = 0; i < len1; i++) { a[i] = *acb_realref(poly1 + i); b[i] = *acb_imagref(poly1 + i); } for (i = 0; i < len2; i++) { c[i] = *acb_realref(poly2 + i); d[i] = *acb_imagref(poly2 + i); } for (i = 0; i < n; i++) { e[i] = *acb_realref(res + i); f[i] = *acb_imagref(res + i); } _arb_poly_mullow(e, a, len1, c, len2, n, prec); _arb_poly_mullow(t, b, len1, d, len2, n, prec); _arb_vec_sub(e, e, t, n, prec); _arb_poly_mullow(f, a, len1, d, len2, n, prec); /* squaring */ if (poly1 == poly2 && len1 == len2) { _arb_vec_scalar_mul_2exp_si(f, f, n, 1); } else { _arb_poly_mullow(t, b, len1, c, len2, n, prec); _arb_vec_add(f, f, t, n, prec); } for (i = 0; i < n; i++) { *acb_realref(res + i) = e[i]; *acb_imagref(res + i) = f[i]; } _arb_vec_clear(t, n); flint_free(w); }