void arb_poly_lgamma_series(arb_poly_t res, const arb_poly_t f, slong n, slong prec) { arb_poly_fit_length(res, n); if (f->length == 0 || n == 0) _arb_vec_indeterminate(res->coeffs, n); else _arb_poly_lgamma_series(res->coeffs, f->coeffs, f->length, n, prec); _arb_poly_set_length(res, n); _arb_poly_normalise(res); }
void keiper_li_series(arb_ptr z, slong len, slong prec) { arb_ptr t, u, v; t = _arb_vec_init(len); u = _arb_vec_init(len); v = _arb_vec_init(len); /* -zeta(s) */ flint_printf("zeta: "); TIMEIT_ONCE_START arb_zero(t + 0); arb_one(t + 1); arb_one(u); _arb_poly_zeta_series(v, t, 2, u, 0, len, prec); _arb_vec_neg(v, v, len); TIMEIT_ONCE_STOP SHOW_MEMORY_USAGE /* logarithm */ flint_printf("log: "); TIMEIT_ONCE_START _arb_poly_log_series(t, v, len, len, prec); TIMEIT_ONCE_STOP /* add log(gamma(1+s/2)) */ flint_printf("gamma: "); TIMEIT_ONCE_START arb_one(u); arb_one(u + 1); arb_mul_2exp_si(u + 1, u + 1, -1); _arb_poly_lgamma_series(v, u, 2, len, prec); _arb_vec_add(t, t, v, len, prec); TIMEIT_ONCE_STOP /* subtract 0.5 s log(pi) */ arb_const_pi(u, prec); arb_log(u, u, prec); arb_mul_2exp_si(u, u, -1); arb_sub(t + 1, t + 1, u, prec); /* add log(1-s) */ arb_one(u); arb_set_si(u + 1, -1); _arb_poly_log_series(v, u, 2, len, prec); _arb_vec_add(t, t, v, len, prec); /* binomial transform */ flint_printf("binomial transform: "); TIMEIT_ONCE_START arb_set(z, t); _arb_vec_neg(t + 1, t + 1, len - 1); _arb_poly_binomial_transform(z + 1, t + 1, len - 1, len - 1, prec); TIMEIT_ONCE_STOP _arb_vec_clear(t, len); _arb_vec_clear(u, len); _arb_vec_clear(v, len); }
void _acb_poly_lgamma_series(acb_ptr res, acb_srcptr h, slong hlen, slong len, slong prec) { int reflect; slong i, r, n, wp; acb_t zr; acb_ptr t, u; hlen = FLINT_MIN(hlen, len); if (hlen == 1) { acb_lgamma(res, h, prec); if (acb_is_finite(res)) _acb_vec_zero(res + 1, len - 1); else _acb_vec_indeterminate(res + 1, len - 1); return; } if (len == 2) { acb_t v; acb_init(v); acb_set(v, h + 1); acb_digamma(res + 1, h, prec); acb_lgamma(res, h, prec); acb_mul(res + 1, res + 1, v, prec); acb_clear(v); return; } /* use real code for real input and output */ if (_acb_vec_is_real(h, hlen) && arb_is_positive(acb_realref(h))) { arb_ptr tmp = _arb_vec_init(len); for (i = 0; i < hlen; i++) arb_set(tmp + i, acb_realref(h + i)); _arb_poly_lgamma_series(tmp, tmp, hlen, len, prec); for (i = 0; i < len; i++) acb_set_arb(res + i, tmp + i); _arb_vec_clear(tmp, len); return; } wp = prec + FLINT_BIT_COUNT(prec); t = _acb_vec_init(len); u = _acb_vec_init(len); acb_init(zr); /* use Stirling series */ acb_gamma_stirling_choose_param(&reflect, &r, &n, h, 1, 0, wp); if (reflect) { /* log gamma(h+x) = log rf(1-(h+x), r) - log gamma(1-(h+x)+r) - log sin(pi (h+x)) + log(pi) */ if (r != 0) /* otherwise t = 0 */ { acb_sub_ui(u, h, 1, wp); acb_neg(u, u); _log_rising_ui_series(t, u, r, len, wp); for (i = 1; i < len; i += 2) acb_neg(t + i, t + i); } acb_sub_ui(u, h, 1, wp); acb_neg(u, u); acb_add_ui(zr, u, r, wp); _acb_poly_gamma_stirling_eval(u, zr, n, len, wp); for (i = 1; i < len; i += 2) acb_neg(u + i, u + i); _acb_vec_sub(t, t, u, len, wp); /* log(sin) is unstable with large imaginary parts; cot_pi is implemented in a numerically stable way */ acb_set(u, h); acb_one(u + 1); _acb_poly_cot_pi_series(u, u, 2, len - 1, wp); _acb_poly_integral(u, u, len, wp); acb_const_pi(u, wp); _acb_vec_scalar_mul(u + 1, u + 1, len - 1, u, wp); acb_log_sin_pi(u, h, wp); _acb_vec_sub(u, t, u, len, wp); acb_const_pi(t, wp); /* todo: constant for log pi */ acb_log(t, t, wp); acb_add(u, u, t, wp); } else { /* log gamma(x) = log gamma(x+r) - log rf(x,r) */ acb_add_ui(zr, h, r, wp); _acb_poly_gamma_stirling_eval(u, zr, n, len, wp); if (r != 0) { _log_rising_ui_series(t, h, r, len, wp); _acb_vec_sub(u, u, t, len, wp); } } /* compose with nonconstant part */ acb_zero(t); _acb_vec_set(t + 1, h + 1, hlen - 1); _acb_poly_compose_series(res, u, len, t, hlen, len, prec); acb_clear(zr); _acb_vec_clear(t, len); _acb_vec_clear(u, len); }
void _arb_bell_sum_taylor(arb_t res, const fmpz_t n, const fmpz_t a, const fmpz_t b, const fmpz_t mmag, long tol) { fmpz_t m, r, R, tmp; mag_t B, C, D, bound; arb_t t, u; long wp, k, N; if (_fmpz_sub_small(b, a) < 5) { arb_bell_sum_bsplit(res, n, a, b, mmag, tol); return; } fmpz_init(m); fmpz_init(r); fmpz_init(R); fmpz_init(tmp); /* r = max(m - a, b - m) */ /* m = a + (b - a) / 2 */ fmpz_sub(r, b, a); fmpz_cdiv_q_2exp(r, r, 1); fmpz_add(m, a, r); fmpz_mul_2exp(R, r, RADIUS_BITS); mag_init(B); mag_init(C); mag_init(D); mag_init(bound); arb_init(t); arb_init(u); if (fmpz_cmp(R, m) >= 0) { mag_inf(C); mag_inf(D); } else { /* C = exp(R * |F'(m)| + (1/2) R^2 * (n/(m-R)^2 + 1/(m-R))) */ /* C = exp(R * (|F'(m)| + (1/2) R * (n/(m-R) + 1)/(m-R))) */ /* D = (1/2) R * (n/(m-R) + 1)/(m-R) */ fmpz_sub(tmp, m, R); mag_set_fmpz(D, n); mag_div_fmpz(D, D, tmp); mag_one(C); mag_add(D, D, C); mag_div_fmpz(D, D, tmp); mag_mul_fmpz(D, D, R); mag_mul_2exp_si(D, D, -1); /* C = |F'(m)| */ wp = 20 + 1.05 * fmpz_bits(n); arb_set_fmpz(t, n); arb_div_fmpz(t, t, m, wp); fmpz_add_ui(tmp, m, 1); arb_set_fmpz(u, tmp); arb_digamma(u, u, wp); arb_sub(t, t, u, wp); arb_get_mag(C, t); /* C = exp(R * (C + D)) */ mag_add(C, C, D); mag_mul_fmpz(C, C, R); mag_exp(C, C); } if (mag_cmp_2exp_si(C, tol / 4 + 2) > 0) { _arb_bell_sum_taylor(res, n, a, m, mmag, tol); _arb_bell_sum_taylor(t, n, m, b, mmag, tol); arb_add(res, res, t, 2 * tol); } else { arb_ptr mx, ser1, ser2, ser3; /* D = T(m) */ wp = 20 + 1.05 * fmpz_bits(n); arb_set_fmpz(t, m); arb_pow_fmpz(t, t, n, wp); fmpz_add_ui(tmp, m, 1); arb_gamma_fmpz(u, tmp, wp); arb_div(t, t, u, wp); arb_get_mag(D, t); /* error bound: (b-a) * C * D * B^N / (1 - B), B = r/R */ /* ((b-a) * C * D * 2) * 2^(-N*RADIUS_BITS) */ /* ((b-a) * C * D * 2) */ mag_mul(bound, C, D); mag_mul_2exp_si(bound, bound, 1); fmpz_sub(tmp, b, a); mag_mul_fmpz(bound, bound, tmp); /* N = (tol + log2((b-a)*C*D*2) - mmag) / RADIUS_BITS */ if (mmag == NULL) { /* estimate D ~= 2^mmag */ fmpz_add_ui(tmp, MAG_EXPREF(C), tol); fmpz_cdiv_q_ui(tmp, tmp, RADIUS_BITS); } else { fmpz_sub(tmp, MAG_EXPREF(bound), mmag); fmpz_add_ui(tmp, tmp, tol); fmpz_cdiv_q_ui(tmp, tmp, RADIUS_BITS); } if (fmpz_cmp_ui(tmp, 5 * tol / 4) > 0) N = 5 * tol / 4; else if (fmpz_cmp_ui(tmp, 2) < 0) N = 2; else N = fmpz_get_ui(tmp); /* multiply by 2^(-N*RADIUS_BITS) */ mag_mul_2exp_si(bound, bound, -N * RADIUS_BITS); mx = _arb_vec_init(2); ser1 = _arb_vec_init(N); ser2 = _arb_vec_init(N); ser3 = _arb_vec_init(N); /* estimate (this should work for moderate n and tol) */ wp = 1.1 * tol + 1.05 * fmpz_bits(n) + 5; /* increase precision until convergence */ while (1) { /* (m+x)^n / gamma(m+1+x) */ arb_set_fmpz(mx, m); arb_one(mx + 1); _arb_poly_log_series(ser1, mx, 2, N, wp); for (k = 0; k < N; k++) arb_mul_fmpz(ser1 + k, ser1 + k, n, wp); arb_add_ui(mx, mx, 1, wp); _arb_poly_lgamma_series(ser2, mx, 2, N, wp); _arb_vec_sub(ser1, ser1, ser2, N, wp); _arb_poly_exp_series(ser3, ser1, N, N, wp); /* t = a - m, u = b - m */ arb_set_fmpz(t, a); arb_sub_fmpz(t, t, m, wp); arb_set_fmpz(u, b); arb_sub_fmpz(u, u, m, wp); arb_power_sum_vec(ser1, t, u, N, wp); arb_zero(res); for (k = 0; k < N; k++) arb_addmul(res, ser3 + k, ser1 + k, wp); if (mmag != NULL) { if (_fmpz_sub_small(MAG_EXPREF(arb_radref(res)), mmag) <= -tol) break; } else { if (arb_rel_accuracy_bits(res) >= tol) break; } wp = 2 * wp; } /* add the series truncation bound */ arb_add_error_mag(res, bound); _arb_vec_clear(mx, 2); _arb_vec_clear(ser1, N); _arb_vec_clear(ser2, N); _arb_vec_clear(ser3, N); } mag_clear(B); mag_clear(C); mag_clear(D); mag_clear(bound); arb_clear(t); arb_clear(u); fmpz_clear(m); fmpz_clear(r); fmpz_clear(R); fmpz_clear(tmp); }