int fmprb_mat_lu(long * P, fmprb_mat_t LU, const fmprb_mat_t A, long prec) { fmprb_t d, e; fmprb_ptr * a; long i, j, m, n, r, row, col; int result; m = fmprb_mat_nrows(A); n = fmprb_mat_ncols(A); result = 1; if (m == 0 || n == 0) return result; fmprb_mat_set(LU, A); a = LU->rows; row = col = 0; for (i = 0; i < m; i++) P[i] = i; fmprb_init(d); fmprb_init(e); while (row < m && col < n) { r = fmprb_mat_find_pivot_partial(LU, row, m, col); if (r == -1) { result = 0; break; } else if (r != row) fmprb_mat_swap_rows(LU, P, row, r); fmprb_set(d, a[row] + col); for (j = row + 1; j < m; j++) { fmprb_div(e, a[j] + col, d, prec); fmprb_neg(e, e); _fmprb_vec_scalar_addmul(a[j] + col, a[row] + col, n - col, e, prec); fmprb_zero(a[j] + col); fmprb_neg(a[j] + row, e); } row++; col++; } fmprb_clear(d); fmprb_clear(e); return result; }
void _fmprb_poly_product_roots(fmprb_ptr poly, fmprb_srcptr xs, long n, long prec) { if (n == 0) { fmprb_one(poly); } else if (n == 1) { fmprb_neg(poly, xs); fmprb_one(poly + 1); } else if (n == 2) { fmprb_mul(poly, xs + 0, xs + 1, prec); fmprb_add(poly + 1, xs + 0, xs + 1, prec); fmprb_neg(poly + 1, poly + 1); fmprb_one(poly + 2); } else { const long m = (n + 1) / 2; fmprb_ptr tmp; tmp = _fmprb_vec_init(n + 2); _fmprb_poly_product_roots(tmp, xs, m, prec); _fmprb_poly_product_roots(tmp + m + 1, xs + m, n - m, prec); _fmprb_poly_mul_monic(poly, tmp, m + 1, tmp + m + 1, n - m + 1, prec); _fmprb_vec_clear(tmp, n + 2); } }
void fmprb_tanh(fmprb_t y, const fmprb_t x, long prec) { fmprb_t t, u; fmprb_init(t); fmprb_init(u); fmprb_mul_2exp_si(t, x, 1); if (fmpr_sgn(fmprb_midref(x)) >= 0) { fmprb_neg(t, t); fmprb_expm1(t, t, prec + 4); fmprb_add_ui(y, t, 2, prec + 4); fmprb_div(y, t, y, prec); fmprb_neg(y, y); } else { fmprb_expm1(t, t, prec + 4); fmprb_add_ui(y, t, 2, prec + 4); fmprb_div(y, t, y, prec); } fmprb_clear(t); fmprb_clear(u); }
void fmpcb_cos_pi(fmpcb_t r, const fmpcb_t z, long prec) { #define a fmpcb_realref(z) #define b fmpcb_imagref(z) fmprb_t sa, ca, sb, cb; fmprb_init(sa); fmprb_init(ca); fmprb_init(sb); fmprb_init(cb); fmprb_sin_cos_pi(sa, ca, a, prec); fmprb_const_pi(cb, prec); fmprb_mul(cb, cb, b, prec); fmprb_sinh_cosh(sb, cb, cb, prec); fmprb_mul(fmpcb_realref(r), ca, cb, prec); fmprb_mul(fmpcb_imagref(r), sa, sb, prec); fmprb_neg(fmpcb_imagref(r), fmpcb_imagref(r)); fmprb_clear(sa); fmprb_clear(ca); fmprb_clear(sb); fmprb_clear(cb); #undef a #undef b }
void fmprb_hypgeom_infsum(fmprb_t P, fmprb_t Q, hypgeom_t hyp, long target_prec, long prec) { mag_t err, z; long n; mag_init(err); mag_init(z); mag_set_fmpz(z, hyp->P->coeffs + hyp->P->length - 1); mag_div_fmpz(z, z, hyp->Q->coeffs + hyp->Q->length - 1); if (!hyp->have_precomputed) { hypgeom_precompute(hyp); hyp->have_precomputed = 1; } n = hypgeom_bound(err, hyp->r, hyp->boundC, hyp->boundD, hyp->boundK, hyp->MK, z, target_prec); fmprb_hypgeom_sum(P, Q, hyp, n, prec); if (fmpr_sgn(fmprb_midref(Q)) < 0) { fmprb_neg(P, P); fmprb_neg(Q, Q); } /* We have p/q = s + err i.e. (p + q*err)/q = s */ { fmpr_t u, v; fmpr_init(u); fmpr_init(v); mag_get_fmpr(v, err); fmpr_add(u, fmprb_midref(Q), fmprb_radref(Q), FMPRB_RAD_PREC, FMPR_RND_UP); fmpr_mul(u, u, v, FMPRB_RAD_PREC, FMPR_RND_UP); fmprb_add_error_fmpr(P, u); fmpr_clear(u); fmpr_clear(v); } mag_clear(z); mag_clear(err); }
long fmprb_mat_gauss_partial(fmprb_mat_t A, long prec) { fmprb_t e; fmprb_ptr * a; long j, m, n, r, rank, row, col, sign; m = A->r; n = A->c; a = A->rows; rank = row = col = 0; sign = 1; fmprb_init(e); while (row < m && col < n) { r = fmprb_mat_find_pivot_partial(A, row, m, col); if (r == -1) { break; } else if (r != row) { fmprb_mat_swap_rows(A, NULL, row, r); sign *= -1; } rank++; for (j = row + 1; j < m; j++) { fmprb_div(e, a[j] + col, a[row] + col, prec); fmprb_neg(e, e); _fmprb_vec_scalar_addmul(a[j] + col + 1, a[row] + col + 1, n - col - 1, e, prec); } row++; col++; } fmprb_clear(e); return rank * sign; }
void _fmprb_poly_riemann_siegel_theta_series(fmprb_ptr res, fmprb_srcptr h, long hlen, long len, long prec) { fmpcb_ptr s; fmprb_t u; long i; hlen = FLINT_MIN(hlen, len); s = _fmpcb_vec_init(len); fmprb_init(u); /* s = 1/4 + (1/2) i h */ for (i = 0; i < hlen; i++) fmprb_mul_2exp_si(fmpcb_imagref(s + i), h + i, -1); fmprb_one(u); fmprb_mul_2exp_si(u, u, -2); fmprb_add(fmpcb_realref(s), fmpcb_realref(s), u, prec); /* log gamma */ _fmpcb_poly_lgamma_series(s, s, hlen, len, prec); /* imaginary part */ for (i = 0; i < len; i++) fmprb_set(res + i, fmpcb_imagref(s + i)); /* subtract log(pi)/2 * h */ fmprb_const_pi(u, prec); fmprb_log(u, u, prec); fmprb_mul_2exp_si(u, u, -1); fmprb_neg(u, u); _fmprb_vec_scalar_addmul(res, h, hlen, u, prec); _fmpcb_vec_clear(s, len); fmprb_clear(u); }
void gamma_stirling_eval_fmprb(fmprb_t s, const fmprb_t z, long nterms, int digamma, long prec) { fmprb_t b, t, logz, zinv, zinv2; fmpr_t err; long k, term_prec; double z_mag, term_mag; fmprb_init(b); fmprb_init(t); fmprb_init(logz); fmprb_init(zinv); fmprb_init(zinv2); fmprb_log(logz, z, prec); fmprb_ui_div(zinv, 1UL, z, prec); nterms = FLINT_MAX(nterms, 1); fmprb_zero(s); if (nterms > 1) { fmprb_mul(zinv2, zinv, zinv, prec); z_mag = fmpr_get_d(fmprb_midref(logz), FMPR_RND_UP) * 1.44269504088896; for (k = nterms - 1; k >= 1; k--) { term_mag = bernoulli_bound_2exp_si(2 * k); term_mag -= (2 * k - 1) * z_mag; term_prec = prec + term_mag; term_prec = FLINT_MIN(term_prec, prec); term_prec = FLINT_MAX(term_prec, 10); if (prec > 2000) { fmprb_set_round(t, zinv2, term_prec); fmprb_mul(s, s, t, term_prec); } else fmprb_mul(s, s, zinv2, term_prec); gamma_stirling_coeff(b, k, digamma, term_prec); fmprb_add(s, s, b, term_prec); } if (digamma) fmprb_mul(s, s, zinv2, prec); else fmprb_mul(s, s, zinv, prec); } /* remainder bound */ fmpr_init(err); gamma_stirling_bound_fmprb(err, z, digamma ? 1 : 0, 1, nterms); fmprb_add_error_fmpr(s, err); fmpr_clear(err); if (digamma) { fmprb_neg(s, s); fmprb_mul_2exp_si(zinv, zinv, -1); fmprb_sub(s, s, zinv, prec); fmprb_add(s, s, logz, prec); } else { /* (z-0.5)*log(z) - z + log(2*pi)/2 */ fmprb_one(t); fmprb_mul_2exp_si(t, t, -1); fmprb_sub(t, z, t, prec); fmprb_mul(t, logz, t, prec); fmprb_add(s, s, t, prec); fmprb_sub(s, s, z, prec); fmprb_const_log_sqrt2pi(t, prec); fmprb_add(s, s, t, prec); } fmprb_clear(t); fmprb_clear(b); fmprb_clear(zinv); fmprb_clear(zinv2); fmprb_clear(logz); }
void _fmprb_poly_zeta_series(fmprb_ptr res, fmprb_srcptr h, long hlen, const fmprb_t a, int deflate, long len, long prec) { long i; fmpcb_t cs, ca; fmpcb_ptr z; fmprb_ptr t, u; if (fmprb_contains_nonpositive(a)) { _fmprb_vec_indeterminate(res, len); return; } hlen = FLINT_MIN(hlen, len); z = _fmpcb_vec_init(len); t = _fmprb_vec_init(len); u = _fmprb_vec_init(len); fmpcb_init(cs); fmpcb_init(ca); /* use reflection formula */ if (fmpr_sgn(fmprb_midref(h)) < 0 && fmprb_is_one(a)) { /* zeta(s) = (2*pi)**s * sin(pi*s/2) / pi * gamma(1-s) * zeta(1-s) */ fmprb_t pi; fmprb_ptr f, s1, s2, s3, s4; fmprb_init(pi); f = _fmprb_vec_init(2); s1 = _fmprb_vec_init(len); s2 = _fmprb_vec_init(len); s3 = _fmprb_vec_init(len); s4 = _fmprb_vec_init(len); fmprb_const_pi(pi, prec); /* s1 = (2*pi)**s */ fmprb_mul_2exp_si(pi, pi, 1); _fmprb_poly_pow_cpx(s1, pi, h, len, prec); fmprb_mul_2exp_si(pi, pi, -1); /* s2 = sin(pi*s/2) / pi */ fmprb_mul_2exp_si(pi, pi, -1); fmprb_mul(f, pi, h, prec); fmprb_set(f + 1, pi); fmprb_mul_2exp_si(pi, pi, 1); _fmprb_poly_sin_series(s2, f, 2, len, prec); _fmprb_vec_scalar_div(s2, s2, len, pi, prec); /* s3 = gamma(1-s) */ fmprb_sub_ui(f, h, 1, prec); fmprb_neg(f, f); fmprb_set_si(f + 1, -1); _fmprb_poly_gamma_series(s3, f, 2, len, prec); /* s4 = zeta(1-s) */ fmprb_sub_ui(f, h, 1, prec); fmprb_neg(f, f); fmpcb_set_fmprb(cs, f); fmpcb_one(ca); zeta_series(z, cs, ca, 0, len, prec); for (i = 0; i < len; i++) fmprb_set(s4 + i, fmpcb_realref(z + i)); for (i = 1; i < len; i += 2) fmprb_neg(s4 + i, s4 + i); _fmprb_poly_mullow(u, s1, len, s2, len, len, prec); _fmprb_poly_mullow(s1, s3, len, s4, len, len, prec); _fmprb_poly_mullow(t, u, len, s1, len, len, prec); /* add 1/(1-(s+t)) = 1/(1-s) + t/(1-s)^2 + ... */ if (deflate) { fmprb_sub_ui(u, h, 1, prec); fmprb_neg(u, u); fmprb_ui_div(u, 1, u, prec); for (i = 1; i < len; i++) fmprb_mul(u + i, u + i - 1, u, prec); _fmprb_vec_add(t, t, u, len, prec); } fmprb_clear(pi); _fmprb_vec_clear(f, 2); _fmprb_vec_clear(s1, len); _fmprb_vec_clear(s2, len); _fmprb_vec_clear(s3, len); _fmprb_vec_clear(s4, len); } else { fmpcb_set_fmprb(cs, h); fmpcb_set_fmprb(ca, a); zeta_series(z, cs, ca, deflate, len, prec); for (i = 0; i < len; i++) fmprb_set(t + i, fmpcb_realref(z + i)); } /* compose with nonconstant part */ fmprb_zero(u); _fmprb_vec_set(u + 1, h + 1, hlen - 1); _fmprb_poly_compose_series(res, t, len, u, hlen, len, prec); _fmpcb_vec_clear(z, len); _fmprb_vec_clear(t, len); _fmprb_vec_clear(u, len); fmpcb_init(cs); fmpcb_init(ca); }
void _fmprb_poly_evaluate_vec_fast_precomp(fmprb_ptr vs, fmprb_srcptr poly, long plen, fmprb_ptr * tree, long len, long prec) { long height, i, j, pow, left; long tree_height; long tlen; fmprb_ptr t, u, swap, pa, pb, pc; /* avoid worrying about some degenerate cases */ if (len < 2 || plen < 2) { if (len == 1) { fmprb_t tmp; fmprb_init(tmp); fmprb_neg(tmp, tree[0] + 0); _fmprb_poly_evaluate(vs + 0, poly, plen, tmp, prec); fmprb_clear(tmp); } else if (len != 0 && plen == 0) { _fmprb_vec_zero(vs, len); } else if (len != 0 && plen == 1) { for (i = 0; i < len; i++) fmprb_set(vs + i, poly + 0); } return; } t = _fmprb_vec_init(len); u = _fmprb_vec_init(len); left = len; /* Initial reduction. We allow the polynomial to be larger or smaller than the number of points. */ height = FLINT_BIT_COUNT(plen - 1) - 1; tree_height = FLINT_CLOG2(len); while (height >= tree_height) height--; pow = 1L << height; for (i = j = 0; i < len; i += pow, j += (pow + 1)) { tlen = ((i + pow) <= len) ? pow : len % pow; _fmprb_poly_rem(t + i, poly, plen, tree[height] + j, tlen + 1, prec); } for (i = height - 1; i >= 0; i--) { pow = 1L << i; left = len; pa = tree[i]; pb = t; pc = u; while (left >= 2 * pow) { _fmprb_poly_rem_2(pc, pb, 2 * pow, pa, pow + 1, prec); _fmprb_poly_rem_2(pc + pow, pb, 2 * pow, pa + pow + 1, pow + 1, prec); pa += 2 * pow + 2; pb += 2 * pow; pc += 2 * pow; left -= 2 * pow; } if (left > pow) { _fmprb_poly_rem(pc, pb, left, pa, pow + 1, prec); _fmprb_poly_rem(pc + pow, pb, left, pa + pow + 1, left - pow + 1, prec); } else if (left > 0) _fmprb_vec_set(pc, pb, left); swap = t; t = u; u = swap; } _fmprb_vec_set(vs, t, len); _fmprb_vec_clear(t, len); _fmprb_vec_clear(u, len); }
static void _fmprb_gamma(fmprb_t y, const fmprb_t x, long prec, int inverse) { int reflect; long r, n, wp; fmprb_t t, u, v; if (fmprb_is_exact(x)) { const fmpr_struct * mid = fmprb_midref(x); if (fmpr_is_special(mid)) { if (!inverse && fmpr_is_pos_inf(mid)) { fmprb_set(y, x); } else if (fmpr_is_nan(mid) || fmpr_is_neg_inf(mid) || !inverse) { fmpr_nan(fmprb_midref(y)); fmpr_pos_inf(fmprb_radref(y)); } else { fmprb_zero(y); } return; } else { const fmpz exp = *fmpr_expref(mid); const fmpz man = *fmpr_manref(mid); /* fast gamma(n), gamma(n/2) or gamma(n/4) */ if (!COEFF_IS_MPZ(exp) && (exp >= -2) && ((double) fmpz_bits(&man) + exp < prec)) { fmpq_t a; fmpq_init(a); fmpr_get_fmpq(a, mid); fmprb_gamma_fmpq(y, a, prec + 2 * inverse); if (inverse) fmprb_ui_div(y, 1, y, prec); fmpq_clear(a); return; } } } wp = prec + FLINT_BIT_COUNT(prec); gamma_stirling_choose_param_fmprb(&reflect, &r, &n, x, 1, 0, wp); fmprb_init(t); fmprb_init(u); fmprb_init(v); if (reflect) { /* gamma(x) = (rf(1-x, r) * pi) / (gamma(1-x+r) sin(pi x)) */ fmprb_sub_ui(t, x, 1, wp); fmprb_neg(t, t); gamma_rising_fmprb_ui_bsplit(u, t, r, wp); fmprb_const_pi(v, wp); fmprb_mul(u, u, v, wp); fmprb_add_ui(t, t, r, wp); gamma_stirling_eval_fmprb(v, t, n, 0, wp); fmprb_exp(v, v, wp); fmprb_sin_pi(t, x, wp); fmprb_mul(v, v, t, wp); } else { /* gamma(x) = gamma(x+r) / rf(x,r) */ fmprb_add_ui(t, x, r, wp); gamma_stirling_eval_fmprb(u, t, n, 0, wp); fmprb_exp(u, u, prec); gamma_rising_fmprb_ui_bsplit(v, x, r, wp); } if (inverse) fmprb_div(y, v, u, prec); else fmprb_div(y, u, v, prec); fmprb_clear(t); fmprb_clear(u); fmprb_clear(v); }
void _fmprb_poly_rgamma_series(fmprb_ptr res, fmprb_srcptr h, long hlen, long len, long prec) { int reflect; long i, rflen, r, n, wp; fmprb_ptr t, u, v; fmprb_struct f[2]; hlen = FLINT_MIN(hlen, len); wp = prec + FLINT_BIT_COUNT(prec); t = _fmprb_vec_init(len); u = _fmprb_vec_init(len); v = _fmprb_vec_init(len); fmprb_init(f); fmprb_init(f + 1); /* use zeta values at small integers */ if (fmprb_is_int(h) && (fmpr_cmpabs_ui(fmprb_midref(h), prec / 2) < 0)) { r = fmpr_get_si(fmprb_midref(h), FMPR_RND_DOWN); gamma_lgamma_series_at_one(u, len, wp); _fmprb_vec_neg(u, u, len); _fmprb_poly_exp_series(t, u, len, len, wp); if (r == 1) { _fmprb_vec_swap(v, t, len); } else if (r <= 0) { fmprb_set(f, h); fmprb_one(f + 1); rflen = FLINT_MIN(len, 2 - r); _fmprb_poly_rising_ui_series(u, f, FLINT_MIN(2, len), 1 - r, rflen, wp); _fmprb_poly_mullow(v, t, len, u, rflen, len, wp); } else { fmprb_one(f); fmprb_one(f + 1); rflen = FLINT_MIN(len, r); _fmprb_poly_rising_ui_series(v, f, FLINT_MIN(2, len), r - 1, rflen, wp); /* TODO: use div_series? */ _fmprb_poly_inv_series(u, v, rflen, len, wp); _fmprb_poly_mullow(v, t, len, u, len, len, wp); } } else { /* otherwise use Stirling series */ gamma_stirling_choose_param_fmprb(&reflect, &r, &n, h, 1, 0, wp); /* rgamma(h) = (gamma(1-h+r) sin(pi h)) / (rf(1-h, r) * pi), h = h0 + t*/ if (reflect) { /* u = gamma(r+1-h) */ fmprb_sub_ui(f, h, r + 1, wp); fmprb_neg(f, f); gamma_stirling_eval_fmprb_series(t, f, n, len, wp); _fmprb_poly_exp_series(u, t, len, len, wp); for (i = 1; i < len; i += 2) fmprb_neg(u + i, u + i); /* v = sin(pi x) */ fmprb_const_pi(f + 1, wp); fmprb_mul(f, h, f + 1, wp); _fmprb_poly_sin_series(v, f, 2, len, wp); _fmprb_poly_mullow(t, u, len, v, len, len, wp); /* rf(1-h,r) * pi */ if (r == 0) { fmprb_const_pi(u, wp); _fmprb_vec_scalar_div(v, t, len, u, wp); } else { fmprb_sub_ui(f, h, 1, wp); fmprb_neg(f, f); fmprb_set_si(f + 1, -1); rflen = FLINT_MIN(len, r + 1); _fmprb_poly_rising_ui_series(v, f, FLINT_MIN(2, len), r, rflen, wp); fmprb_const_pi(u, wp); _fmprb_vec_scalar_mul(v, v, rflen, u, wp); /* divide by rising factorial */ /* TODO: might better to use div_series, when it has a good basecase */ _fmprb_poly_inv_series(u, v, rflen, len, wp); _fmprb_poly_mullow(v, t, len, u, len, len, wp); } } else { /* rgamma(h) = rgamma(h+r) rf(h,r) */ if (r == 0) { fmprb_add_ui(f, h, r, wp); gamma_stirling_eval_fmprb_series(t, f, n, len, wp); _fmprb_vec_neg(t, t, len); _fmprb_poly_exp_series(v, t, len, len, wp); } else { fmprb_set(f, h); fmprb_one(f + 1); rflen = FLINT_MIN(len, r + 1); _fmprb_poly_rising_ui_series(t, f, FLINT_MIN(2, len), r, rflen, wp); fmprb_add_ui(f, h, r, wp); gamma_stirling_eval_fmprb_series(v, f, n, len, wp); _fmprb_vec_neg(v, v, len); _fmprb_poly_exp_series(u, v, len, len, wp); _fmprb_poly_mullow(v, u, len, t, rflen, len, wp); } } } /* compose with nonconstant part */ fmprb_zero(t); _fmprb_vec_set(t + 1, h + 1, hlen - 1); _fmprb_poly_compose_series(res, v, len, t, hlen, len, prec); fmprb_clear(f); fmprb_clear(f + 1); _fmprb_vec_clear(t, len); _fmprb_vec_clear(u, len); _fmprb_vec_clear(v, len); }