/* modified version of bessel_k that accepts a work array instead of allocating one. */ double bessel_k_ex(double x, double alpha, double expo, double *bk) { long nb, ncalc, ize; #ifdef IEEE_754 /* NaNs propagated correctly */ if (ISNAN(x) || ISNAN(alpha)) return x + alpha; #endif if (x < 0) { ML_ERROR(ME_RANGE, "bessel_k"); return ML_NAN; } ize = (long)expo; if(alpha < 0) alpha = -alpha; nb = 1+ (long)floor(alpha);/* nb-1 <= |alpha| < nb */ alpha -= (double)(nb-1); K_bessel(&x, &alpha, &nb, &ize, bk, &ncalc); if(ncalc != nb) {/* error input */ if(ncalc < 0) MATHLIB_WARNING4(_("bessel_k(%g): ncalc (=%ld) != nb (=%ld); alpha=%g. Arg. out of range?\n"), x, ncalc, nb, alpha); else MATHLIB_WARNING2(_("bessel_k(%g,nu=%g): precision lost in result\n"), x, alpha+(double)nb-1); } x = bk[nb-1]; return x; }
attribute_hidden double pbeta_raw(double x, double a, double b, int lower_tail, int log_p) { // treat limit cases correctly here: if(a == 0 || b == 0 || !R_FINITE(a) || !R_FINITE(b)) { // NB: 0 < x < 1 : if(a == 0 && b == 0) // point mass 1/2 at each of {0,1} : return (log_p ? -M_LN2 : 0.5); if (a == 0 || a/b == 0) // point mass 1 at 0 ==> P(X <= x) = 1, all x > 0 return R_DT_1; if (b == 0 || b/a == 0) // point mass 1 at 1 ==> P(X <= x) = 0, all x < 1 return R_DT_0; // else, remaining case: a = b = Inf : point mass 1 at 1/2 if (x < 0.5) return R_DT_0; else return R_DT_1; } // Now: 0 < a < Inf; 0 < b < Inf double x1 = 0.5 - x + 0.5, w, wc; int ierr; //==== bratio(a, b, x, x1, &w, &wc, &ierr, log_p); /* -> ./toms708.c */ //==== // ierr in {10,14} <==> bgrat() error code ierr-10 in 1:4; for 1 and 4, warned *there* if(ierr && ierr != 11 && ierr != 14) MATHLIB_WARNING4(_("pbeta_raw(%g, a=%g, b=%g, ..) -> bratio() gave error code %d"), x, a,b, ierr); return lower_tail ? w : wc; } /* pbeta_raw() */
/* modified version of bessel_j that accepts a work array instead of allocating one. */ double bessel_j_ex(double x, double alpha, double *bj) { long nb, ncalc; double na; #ifdef IEEE_754 /* NaNs propagated correctly */ if (ISNAN(x) || ISNAN(alpha)) return x + alpha; #endif if (x < 0) { ML_ERROR(ME_RANGE, "bessel_j"); return ML_NAN; } na = floor(alpha); if (alpha < 0) { /* Using Abramowitz & Stegun 9.1.2 * this may not be quite optimal (CPU and accuracy wise) */ return(bessel_j_ex(x, -alpha, bj) * cos(M_PI * alpha) + ((alpha == na) ? 0 : bessel_y_ex(x, -alpha, bj) * sin(M_PI * alpha))); } nb = 1 + (long)na; /* nb-1 <= alpha < nb */ alpha -= (nb-1); J_bessel(&x, &alpha, &nb, bj, &ncalc); if(ncalc != nb) {/* error input */ if(ncalc < 0) MATHLIB_WARNING4(_("bessel_j(%g): ncalc (=%ld) != nb (=%ld); alpha=%g. Arg. out of range?\n"), x, ncalc, nb, alpha); else MATHLIB_WARNING2(_("bessel_j(%g,nu=%g): precision lost in result\n"), x, alpha+nb-1); } x = bj[nb-1]; return x; }
// unused now from R double bessel_j(double x, double alpha) { int nb, ncalc; double na, *bj; #ifndef MATHLIB_STANDALONE const void *vmax; #endif #ifdef IEEE_754 /* NaNs propagated correctly */ if (ISNAN(x) || ISNAN(alpha)) return x + alpha; #endif if (x < 0) { ML_ERROR(ME_RANGE, "bessel_j"); return ML_NAN; } na = floor(alpha); if (alpha < 0) { /* Using Abramowitz & Stegun 9.1.2 * this may not be quite optimal (CPU and accuracy wise) */ return(((alpha - na == 0.5) ? 0 : bessel_j(x, -alpha) * cospi(alpha)) + ((alpha == na ) ? 0 : bessel_y(x, -alpha) * sinpi(alpha))); } else if (alpha > 1e7) { MATHLIB_WARNING("besselJ(x, nu): nu=%g too large for bessel_j() algorithm", alpha); return ML_NAN; } nb = 1 + (int)na; /* nb-1 <= alpha < nb */ alpha -= (double)(nb-1); #ifdef MATHLIB_STANDALONE bj = (double *) calloc(nb, sizeof(double)); #ifndef _RENJIN if (!bj) MATHLIB_ERROR("%s", _("bessel_j allocation error")); #endif #else vmax = vmaxget(); bj = (double *) R_alloc((size_t) nb, sizeof(double)); #endif J_bessel(&x, &alpha, &nb, bj, &ncalc); if(ncalc != nb) {/* error input */ if(ncalc < 0) MATHLIB_WARNING4(_("bessel_j(%g): ncalc (=%d) != nb (=%d); alpha=%g. Arg. out of range?\n"), x, ncalc, nb, alpha); else MATHLIB_WARNING2(_("bessel_j(%g,nu=%g): precision lost in result\n"), x, alpha+(double)nb-1); } x = bj[nb-1]; #ifdef MATHLIB_STANDALONE free(bj); #else vmaxset(vmax); #endif return x; }
double bessel_y(double x, double alpha) { long nb, ncalc; double na, *by; #ifndef MATHLIB_STANDALONE const void *vmax; #endif #ifdef IEEE_754 /* NaNs propagated correctly */ if (ISNAN(x) || ISNAN(alpha)) return x + alpha; #endif if (x < 0) { ML_ERROR(ME_RANGE, "bessel_y"); return ML_NAN; } na = floor(alpha); if (alpha < 0) { /* Using Abramowitz & Stegun 9.1.2 * this may not be quite optimal (CPU and accuracy wise) */ return(bessel_y(x, -alpha) * cos(M_PI * alpha) - ((alpha == na) ? 0 : bessel_j(x, -alpha) * sin(M_PI * alpha))); } nb = 1+ (long)na;/* nb-1 <= alpha < nb */ alpha -= (nb-1); #ifdef MATHLIB_STANDALONE by = (double *) calloc(nb, sizeof(double)); if (!by) MATHLIB_ERROR("%s", _("bessel_y allocation error")); #else vmax = vmaxget(); by = (double *) R_alloc((size_t) nb, sizeof(double)); #endif Y_bessel(&x, &alpha, &nb, by, &ncalc); if(ncalc != nb) {/* error input */ if(ncalc == -1) return ML_POSINF; else if(ncalc < -1) MATHLIB_WARNING4(_("bessel_y(%g): ncalc (=%ld) != nb (=%ld); alpha=%g. Arg. out of range?\n"), x, ncalc, nb, alpha); else /* ncalc >= 0 */ MATHLIB_WARNING2(_("bessel_y(%g,nu=%g): precision lost in result\n"), x, alpha+nb-1); } x = by[nb-1]; #ifdef MATHLIB_STANDALONE free(by); #else vmaxset(vmax); #endif return x; }
double bessel_k(double x, double alpha, double expo) { long nb, ncalc, ize; double *bk; #ifndef MATHLIB_STANDALONE const void *vmax; #endif #ifdef IEEE_754 /* NaNs propagated correctly */ if (ISNAN(x) || ISNAN(alpha)) return x + alpha; #endif if (x < 0) { ML_ERROR(ME_RANGE, "bessel_k"); return ML_NAN; } ize = (long)expo; if(alpha < 0) alpha = -alpha; nb = 1+ (long)floor(alpha);/* nb-1 <= |alpha| < nb */ alpha -= (double)(nb-1); #ifdef MATHLIB_STANDALONE bk = (double *) calloc(nb, sizeof(double)); if (!bk) MATHLIB_ERROR("%s", _("bessel_k allocation error")); #else vmax = vmaxget(); bk = (double *) R_alloc((size_t) nb, sizeof(double)); #endif K_bessel(&x, &alpha, &nb, &ize, bk, &ncalc); if(ncalc != nb) {/* error input */ if(ncalc < 0) MATHLIB_WARNING4(_("bessel_k(%g): ncalc (=%ld) != nb (=%ld); alpha=%g. Arg. out of range?\n"), x, ncalc, nb, alpha); else MATHLIB_WARNING2(_("bessel_k(%g,nu=%g): precision lost in result\n"), x, alpha+(double)nb-1); } x = bk[nb-1]; #ifdef MATHLIB_STANDALONE free(bk); #else vmaxset(vmax); #endif return x; }
/* Called from R: modified version of bessel_j(), accepting a work array * instead of allocating one. */ double bessel_j_ex(double x, double alpha, double *bj) { int nb, ncalc; double na; #ifdef IEEE_754 /* NaNs propagated correctly */ if (ISNAN(x) || ISNAN(alpha)) return x + alpha; #endif if (x < 0) { ML_ERROR(ME_RANGE, "bessel_j"); return ML_NAN; } na = floor(alpha); if (alpha < 0) { /* Using Abramowitz & Stegun 9.1.2 * this may not be quite optimal (CPU and accuracy wise) */ return(((alpha - na == 0.5) ? 0 : bessel_j_ex(x, -alpha, bj) * cospi(alpha)) + ((alpha == na ) ? 0 : bessel_y_ex(x, -alpha, bj) * sinpi(alpha))); } else if (alpha > 1e7) { MATHLIB_WARNING("besselJ(x, nu): nu=%g too large for bessel_j() algorithm", alpha); return ML_NAN; } nb = 1 + (int)na; /* nb-1 <= alpha < nb */ alpha -= (double)(nb-1); // ==> alpha' in [0, 1) J_bessel(&x, &alpha, &nb, bj, &ncalc); if(ncalc != nb) {/* error input */ if(ncalc < 0) MATHLIB_WARNING4(_("bessel_j(%g): ncalc (=%d) != nb (=%d); alpha=%g. Arg. out of range?\n"), x, ncalc, nb, alpha); else MATHLIB_WARNING2(_("bessel_j(%g,nu=%g): precision lost in result\n"), x, alpha+(double)nb-1); } x = bj[nb-1]; return x; }