int pam_get_item(const pam_handle_t *pamh, int item_type, const void **item) { ENTERI(item_type); if (pamh == NULL) RETURNC(PAM_SYSTEM_ERR); switch (item_type) { case PAM_SERVICE: case PAM_USER: case PAM_AUTHTOK: case PAM_OLDAUTHTOK: case PAM_TTY: case PAM_RHOST: case PAM_RUSER: case PAM_CONV: case PAM_USER_PROMPT: case PAM_REPOSITORY: case PAM_AUTHTOK_PROMPT: case PAM_OLDAUTHTOK_PROMPT: case PAM_HOST: *item = pamh->item[item_type]; RETURNC(PAM_SUCCESS); default: RETURNC(PAM_SYMBOL_ERR); } }
int pam_set_item(pam_handle_t *pamh, int item_type, const void *item) { void **slot, *tmp; size_t nsize, osize; ENTERI(item_type); if (pamh == NULL) RETURNC(PAM_SYSTEM_ERR); slot = &pamh->item[item_type]; osize = nsize = 0; switch (item_type) { case PAM_SERVICE: /* set once only, by pam_start() */ if (*slot != NULL) RETURNC(PAM_SYSTEM_ERR); /* fall through */ case PAM_USER: case PAM_AUTHTOK: case PAM_OLDAUTHTOK: case PAM_TTY: case PAM_RHOST: case PAM_RUSER: case PAM_USER_PROMPT: case PAM_AUTHTOK_PROMPT: case PAM_OLDAUTHTOK_PROMPT: case PAM_HOST: if (*slot != NULL) osize = strlen(*slot) + 1; if (item != NULL) nsize = strlen(item) + 1; break; case PAM_REPOSITORY: osize = nsize = sizeof(struct pam_repository); break; case PAM_CONV: osize = nsize = sizeof(struct pam_conv); break; default: RETURNC(PAM_SYMBOL_ERR); } if (*slot != NULL) { memset(*slot, 0xd0, osize); FREE(*slot); } if (item != NULL) { if ((tmp = malloc(nsize)) == NULL) RETURNC(PAM_BUF_ERR); memcpy(tmp, item, nsize); } else { tmp = NULL; } *slot = tmp; RETURNC(PAM_SUCCESS); }
long double sinl(long double x) { union IEEEl2bits z; int e0, s; long double y[2]; long double hi, lo; z.e = x; s = z.bits.sign; z.bits.sign = 0; /* If x = +-0 or x is a subnormal number, then sin(x) = x */ if (z.bits.exp == 0) return (x); /* If x = NaN or Inf, then sin(x) = NaN. */ if (z.bits.exp == 32767) return ((x - x) / (x - x)); ENTERI(); /* Optimize the case where x is already within range. */ if (z.e < M_PI_4) { hi = __kernel_sinl(z.e, 0, 0); RETURNI(s ? -hi : hi); } e0 = __ieee754_rem_pio2l(x, y); hi = y[0]; lo = y[1]; switch (e0 & 3) { case 0: hi = __kernel_sinl(hi, lo, 1); break; case 1: hi = __kernel_cosl(hi, lo); break; case 2: hi = - __kernel_sinl(hi, lo, 1); break; case 3: hi = - __kernel_cosl(hi, lo); break; } RETURNI(hi); }
long double sinhl(long double x) { long double hi,lo,x2,x4; double dx2,s; int16_t ix,jx; GET_LDBL_EXPSIGN(jx,x); ix = jx&0x7fff; /* x is INF or NaN */ if(ix>=0x7fff) return x+x; ENTERI(); s = 1; if (jx<0) s = -1; /* |x| < 64, return x, s(x), or accurate s*(exp(|x|)/2-1/exp(|x|)/2) */ if (ix<0x4005) { /* |x|<64 */ if (ix<BIAS-(LDBL_MANT_DIG+1)/2) /* |x|<TINY */ if(shuge+x>1) RETURNI(x); /* sinh(tiny) = tiny with inexact */ if (ix<0x3fff) { /* |x|<1 */ x2 = x*x; #if LDBL_MANT_DIG == 64 x4 = x2*x2; RETURNI(((S17*x2 + S15)*x4 + (S13*x2 + S11))*(x2*x*x4*x4) + ((S9*x2 + S7)*x2 + S5)*(x2*x*x2) + S3*(x2*x) + x); #elif LDBL_MANT_DIG == 113 dx2 = x2; RETURNI(((((((((((S25*dx2 + S23)*dx2 + S21)*x2 + S19)*x2 + S17)*x2 + S15)*x2 + S13)*x2 + S11)*x2 + S9)*x2 + S7)*x2 + S5)* (x2*x*x2) + S3*(x2*x) + x); #endif } k_hexpl(fabsl(x), &hi, &lo); RETURNI(s*(lo - 0.25/(hi + lo) + hi)); } /* |x| in [64, o_threshold], return correctly-overflowing s*exp(|x|)/2 */ if (fabsl(x) <= o_threshold) RETURNI(s*hexpl(fabsl(x))); /* |x| > o_threshold, sinh(x) overflow */ return x*shuge; }
long double coshl(long double x) { long double hi,lo,x2,x4; #if LDBL_MANT_DIG == 113 double dx2; #endif uint16_t ix; GET_LDBL_EXPSIGN(ix,x); ix &= 0x7fff; /* x is INF or NaN */ if(ix>=0x7fff) return x*x; ENTERI(); /* |x| < 1, return 1 or c(x) */ if(ix<0x3fff) { if (ix<BIAS-(LDBL_MANT_DIG+1)/2) /* |x| < TINY */ RETURNI(1+tiny); /* cosh(tiny) = 1(+) with inexact */ x2 = x*x; #if LDBL_MANT_DIG == 64 x4 = x2*x2; RETURNI(((C16*x2 + C14)*x4 + (C12*x2 + C10))*(x4*x4*x2) + ((C8*x2 + C6)*x2 + C4)*x4 + C2*x2 + 1); #elif LDBL_MANT_DIG == 113 dx2 = x2; RETURNI((((((((((((C26*dx2 + C24)*dx2 + C22)*dx2 + C20)*x2 + C18)*x2 + C16)*x2 + C14)*x2 + C12)*x2 + C10)*x2 + C8)*x2 + C6)*x2 + C4)*(x2*x2) + C2*x2 + 1); #endif } /* |x| in [1, 64), return accurate exp(|x|)/2+1/exp(|x|)/2 */ if (ix < 0x4005) { k_hexpl(fabsl(x), &hi, &lo); RETURNI(lo + 0.25/(hi + lo) + hi); } /* |x| in [64, o_threshold], return correctly-overflowing exp(|x|)/2 */ if (fabsl(x) <= o_threshold) RETURNI(hexpl(fabsl(x))); /* |x| > o_threshold, cosh(x) overflow */ RETURNI(huge*huge); }
long double expl(long double x) { union IEEEl2bits u; long double hi, lo, t, twopk; int k; uint16_t hx, ix; DOPRINT_START(&x); /* Filter out exceptional cases. */ u.e = x; hx = u.xbits.expsign; ix = hx & 0x7fff; if (ix >= BIAS + 13) { /* |x| >= 8192 or x is NaN */ if (ix == BIAS + LDBL_MAX_EXP) { if (hx & 0x8000) /* x is -Inf or -NaN */ RETURNP(-1 / x); RETURNP(x + x); /* x is +Inf or +NaN */ } if (x > o_threshold) RETURNP(huge * huge); if (x < u_threshold) RETURNP(tiny * tiny); } else if (ix < BIAS - 114) { /* |x| < 0x1p-114 */ RETURN2P(1, x); /* 1 with inexact iff x != 0 */ } ENTERI(); twopk = 1; __k_expl(x, &hi, &lo, &k); t = SUM2P(hi, lo); /* Scale by 2**k. */ /* XXX sparc64 multiplication is so slow that scalbnl() is faster. */ if (k >= LDBL_MIN_EXP) { if (k == LDBL_MAX_EXP) RETURNI(t * 2 * 0x1p16383L); SET_LDBL_EXPSIGN(twopk, BIAS + k); RETURNI(t * twopk); } else { SET_LDBL_EXPSIGN(twopk, BIAS + k + 10000); RETURNI(t * twopk * twom10000); } }
int openpam_borrow_cred(pam_handle_t *pamh, const struct passwd *pwd) { struct pam_saved_cred *scred; const void *scredp; int r; ENTERI(pwd->pw_uid); r = pam_get_data(pamh, PAM_SAVED_CRED, &scredp); if (r == PAM_SUCCESS && scredp != NULL) { openpam_log(PAM_LOG_DEBUG, "already operating under borrowed credentials"); RETURNC(PAM_SYSTEM_ERR); } if (geteuid() != 0 && geteuid() != pwd->pw_uid) { openpam_log(PAM_LOG_DEBUG, "called with non-zero euid: %d", (int)geteuid()); RETURNC(PAM_PERM_DENIED); } scred = calloc((size_t)1, sizeof *scred); if (scred == NULL) RETURNC(PAM_BUF_ERR); scred->euid = geteuid(); scred->egid = getegid(); r = getgroups(NGROUPS_MAX, scred->groups); if (r < 0) { FREE(scred); RETURNC(PAM_SYSTEM_ERR); } scred->ngroups = r; r = pam_set_data(pamh, PAM_SAVED_CRED, scred, &openpam_free_data); if (r != PAM_SUCCESS) { FREE(scred); RETURNC(r); } if (geteuid() == pwd->pw_uid) RETURNC(PAM_SUCCESS); if (initgroups(pwd->pw_name, pwd->pw_gid) < 0 || setegid(pwd->pw_gid) < 0 || seteuid(pwd->pw_uid) < 0) { openpam_restore_cred(pamh); RETURNC(PAM_SYSTEM_ERR); } RETURNC(PAM_SUCCESS); /*NOTREACHED*/ }
long double atanhl(long double x) { long double t; uint16_t hx, ix; ENTERI(); GET_LDBL_EXPSIGN(hx, x); ix = hx & 0x7fff; if (ix >= 0x3fff) /* |x| >= 1, or NaN or misnormal */ RETURNI(fabsl(x) == 1 ? x / zero : (x - x) / (x - x)); if (ix < BIAS + EXP_TINY && (huge + x) > zero) RETURNI(x); /* x is tiny */ SET_LDBL_EXPSIGN(x, ix); if (ix < 0x3ffe) { /* |x| < 0.5, or misnormal */ t = x+x; t = 0.5*log1pl(t+t*x/(one-x)); } else t = 0.5*log1pl((x+x)/(one-x)); RETURNI((hx & 0x8000) == 0 ? t : -t); }
long double roundl(long double x) { long double t; uint16_t hx; GET_LDBL_EXPSIGN(hx, x); if ((hx & 0x7fff) == 0x7fff) return (x + x); ENTERI(); if (!(hx & 0x8000)) { t = floorl(x); if (t - x <= -0.5L) t += 1; RETURNI(t); } else { t = floorl(-x); if (t + x <= -0.5L) t += 1; RETURNI(-t); } }
long double asinhl(long double x) { long double t, w; uint16_t hx, ix; ENTERI(); GET_LDBL_EXPSIGN(hx, x); ix = hx & 0x7fff; if (ix >= 0x7fff) RETURNI(x+x); /* x is inf, NaN or misnormal */ if (ix < BIAS + EXP_TINY) { /* |x| < TINY, or misnormal */ if (huge + x > one) RETURNI(x); /* return x inexact except 0 */ } if (ix >= BIAS + EXP_LARGE) { /* |x| >= LARGE, or misnormal */ w = logl(fabsl(x))+ln2; } else if (ix >= 0x4000) { /* LARGE > |x| >= 2.0, or misnormal */ t = fabsl(x); w = logl(2.0*t+one/(sqrtl(x*x+one)+t)); } else { /* 2.0 > |x| >= TINY, or misnormal */ t = x*x; w =log1pl(fabsl(x)+t/(one+sqrtl(one+t))); } RETURNI((hx & 0x8000) == 0 ? w : -w); }
long double expm1l(long double x) { union IEEEl2bits u, v; long double fn, hx2_hi, hx2_lo, q, r, r1, r2, t, twomk, twopk, x_hi; long double x_lo, x2, z; long double x4; int k, n, n2; uint16_t hx, ix; DOPRINT_START(&x); /* Filter out exceptional cases. */ u.e = x; hx = u.xbits.expsign; ix = hx & 0x7fff; if (ix >= BIAS + 6) { /* |x| >= 64 or x is NaN */ if (ix == BIAS + LDBL_MAX_EXP) { if (hx & 0x8000) /* x is -Inf, -NaN or unsupported */ RETURNP(-1 / x - 1); RETURNP(x + x); /* x is +Inf, +NaN or unsupported */ } if (x > o_threshold) RETURNP(huge * huge); /* * expm1l() never underflows, but it must avoid * unrepresentable large negative exponents. We used a * much smaller threshold for large |x| above than in * expl() so as to handle not so large negative exponents * in the same way as large ones here. */ if (hx & 0x8000) /* x <= -64 */ RETURN2P(tiny, -1); /* good for x < -65ln2 - eps */ } ENTERI(); if (T1 < x && x < T2) { if (ix < BIAS - 74) { /* |x| < 0x1p-74 (includes pseudos) */ /* x (rounded) with inexact if x != 0: */ RETURNPI(x == 0 ? x : (0x1p100 * x + fabsl(x)) * 0x1p-100); } x2 = x * x; x4 = x2 * x2; q = x4 * (x2 * (x4 * /* * XXX the number of terms is no longer good for * pairwise grouping of all except B3, and the * grouping is no longer from highest down. */ (x2 * B12 + (x * B11 + B10)) + (x2 * (x * B9 + B8) + (x * B7 + B6))) + (x * B5 + B4.e)) + x2 * x * B3.e; x_hi = (float)x; x_lo = x - x_hi; hx2_hi = x_hi * x_hi / 2; hx2_lo = x_lo * (x + x_hi) / 2; if (ix >= BIAS - 7) RETURN2PI(hx2_hi + x_hi, hx2_lo + x_lo + q); else RETURN2PI(x, hx2_lo + q + hx2_hi); } /* Reduce x to (k*ln2 + endpoint[n2] + r1 + r2). */ /* Use a specialized rint() to get fn. Assume round-to-nearest. */ fn = x * INV_L + 0x1.8p63 - 0x1.8p63; #if defined(HAVE_EFFICIENT_IRINTL) n = irintl(fn); #elif defined(HAVE_EFFICIENT_IRINT) n = irint(fn); #else n = (int)fn; #endif n2 = (unsigned)n % INTERVALS; k = n >> LOG2_INTERVALS; r1 = x - fn * L1; r2 = fn * -L2; r = r1 + r2; /* Prepare scale factor. */ v.e = 1; v.xbits.expsign = BIAS + k; twopk = v.e; /* * Evaluate lower terms of * expl(endpoint[n2] + r1 + r2) = tbl[n2] * expl(r1 + r2). */ z = r * r; q = r2 + z * (A2 + r * A3) + z * z * (A4 + r * A5) + z * z * z * A6; t = (long double)tbl[n2].lo + tbl[n2].hi; if (k == 0) { t = SUM2P(tbl[n2].hi - 1, tbl[n2].lo * (r1 + 1) + t * q + tbl[n2].hi * r1); RETURNI(t); } if (k == -1) { t = SUM2P(tbl[n2].hi - 2, tbl[n2].lo * (r1 + 1) + t * q + tbl[n2].hi * r1); RETURNI(t / 2); } if (k < -7) { t = SUM2P(tbl[n2].hi, tbl[n2].lo + t * (q + r1)); RETURNI(t * twopk - 1); } if (k > 2 * LDBL_MANT_DIG - 1) { t = SUM2P(tbl[n2].hi, tbl[n2].lo + t * (q + r1)); if (k == LDBL_MAX_EXP) RETURNI(t * 2 * 0x1p16383L - 1); RETURNI(t * twopk - 1); } v.xbits.expsign = BIAS - k; twomk = v.e; if (k > LDBL_MANT_DIG - 1) t = SUM2P(tbl[n2].hi, tbl[n2].lo - twomk + t * (q + r1)); else t = SUM2P(tbl[n2].hi - twomk, tbl[n2].lo + t * (q + r1)); RETURNI(t * twopk); }
long double tanhl(long double x) { long double hi,lo,s,x2,x4,z; double dx2; int16_t jx,ix; GET_LDBL_EXPSIGN(jx,x); ix = jx&0x7fff; /* x is INF or NaN */ if(ix>=0x7fff) { if (jx>=0) return one/x+one; /* tanh(+-inf)=+-1 */ else return one/x-one; /* tanh(NaN) = NaN */ } ENTERI(); /* |x| < 40 */ if (ix < 0x4004 || fabsl(x) < 40) { /* |x|<40 */ if (__predict_false(ix<BIAS-(LDBL_MANT_DIG+1)/2)) { /* |x|<TINY */ /* tanh(+-0) = +0; tanh(tiny) = tiny(-+) with inexact: */ return (x == 0 ? x : (0x1p200 * x - x) * 0x1p-200); } if (ix<0x3ffd) { /* |x|<0.25 */ x2 = x*x; #if LDBL_MANT_DIG == 64 x4 = x2*x2; RETURNI(((T19*x2 + T17)*x4 + (T15*x2 + T13))*(x2*x*x2*x4*x4) + ((T11*x2 + T9)*x4 + (T7*x2 + T5))*(x2*x*x2) + T3*(x2*x) + x); #elif LDBL_MANT_DIG == 113 dx2 = x2; #if 0 RETURNI(((((((((((((((T33*dx2 + T31)*dx2 + T29)*dx2 + T27)*dx2 + T25)*x2 + T23)*x2 + T21)*x2 + T19)*x2 + T17)*x2 + T15)*x2 + T13)*x2 + T11)*x2 + T9)*x2 + T7)*x2 + T5)* (x2*x*x2) + T3*(x2*x) + x); #else long double q = ((((((((((((((T33*dx2 + T31)*dx2 + T29)*dx2 + T27)*dx2 + T25)*x2 + T23)*x2 + T21)*x2 + T19)*x2 + T17)*x2 + T15)*x2 + T13)*x2 + T11)*x2 + T9)*x2 + T7)*x2 + T5)* (x2*x*x2); RETURNI(q + T3*(x2*x) + x); #endif #endif } k_hexpl(2*fabsl(x), &hi, &lo); if (ix<0x4001 && fabsl(x) < 1.5) /* |x|<1.5 */ z = divl(hi, lo, -0.5, hi, lo, 0.5); else z = one - one/(lo+0.5+hi); /* |x| >= 40, return +-1 */ } else { z = one - tiny; /* raise inexact flag */ } s = 1; if (jx<0) s = -1; RETURNI(s*z); }
long double expm1l(long double x) { union IEEEl2bits u, v; long double hx2_hi, hx2_lo, q, r, r1, t, twomk, twopk, x_hi; long double x_lo, x2; double dr, dx, fn, r2; int k, n, n2; uint16_t hx, ix; /* Filter out exceptional cases. */ u.e = x; hx = u.xbits.expsign; ix = hx & 0x7fff; if (ix >= BIAS + 7) { /* |x| >= 128 or x is NaN */ if (ix == BIAS + LDBL_MAX_EXP) { if (hx & 0x8000) /* x is -Inf or -NaN */ return (-1 / x - 1); return (x + x); /* x is +Inf or +NaN */ } if (x > o_threshold) return (huge * huge); /* * expm1l() never underflows, but it must avoid * unrepresentable large negative exponents. We used a * much smaller threshold for large |x| above than in * expl() so as to handle not so large negative exponents * in the same way as large ones here. */ if (hx & 0x8000) /* x <= -128 */ return (tiny - 1); /* good for x < -114ln2 - eps */ } ENTERI(); if (T1 < x && x < T2) { x2 = x * x; dx = x; if (x < T3) { if (ix < BIAS - 113) { /* |x| < 0x1p-113 */ /* x (rounded) with inexact if x != 0: */ RETURNI(x == 0 ? x : (0x1p200 * x + fabsl(x)) * 0x1p-200); } q = x * x2 * C3 + x2 * x2 * (C4 + x * (C5 + x * (C6 + x * (C7 + x * (C8 + x * (C9 + x * (C10 + x * (C11 + x * (C12 + x * (C13 + dx * (C14 + dx * (C15 + dx * (C16 + dx * (C17 + dx * C18)))))))))))))); } else { q = x * x2 * D3 + x2 * x2 * (D4 + x * (D5 + x * (D6 + x * (D7 + x * (D8 + x * (D9 + x * (D10 + x * (D11 + x * (D12 + x * (D13 + dx * (D14 + dx * (D15 + dx * (D16 + dx * D17))))))))))))); } x_hi = (float)x; x_lo = x - x_hi; hx2_hi = x_hi * x_hi / 2; hx2_lo = x_lo * (x + x_hi) / 2; if (ix >= BIAS - 7) RETURNI(hx2_lo + x_lo + q + (hx2_hi + x_hi)); else RETURNI(hx2_lo + q + hx2_hi + x); } /* Reduce x to (k*ln2 + endpoint[n2] + r1 + r2). */ /* Use a specialized rint() to get fn. Assume round-to-nearest. */ fn = (double)x * INV_L + 0x1.8p52 - 0x1.8p52; #if defined(HAVE_EFFICIENT_IRINT) n = irint(fn); #else n = (int)fn; #endif n2 = (unsigned)n % INTERVALS; k = n >> LOG2_INTERVALS; r1 = x - fn * L1; r2 = fn * -L2; r = r1 + r2; /* Prepare scale factor. */ v.e = 1; v.xbits.expsign = BIAS + k; twopk = v.e; /* * Evaluate lower terms of * expl(endpoint[n2] + r1 + r2) = tbl[n2] * expl(r1 + r2). */ dr = r; q = r2 + r * r * (A2 + r * (A3 + r * (A4 + r * (A5 + r * (A6 + dr * (A7 + dr * (A8 + dr * (A9 + dr * A10)))))))); t = tbl[n2].lo + tbl[n2].hi; if (k == 0) { t = tbl[n2].lo * (r1 + 1) + t * q + tbl[n2].hi * r1 + (tbl[n2].hi - 1); RETURNI(t); } if (k == -1) { t = tbl[n2].lo * (r1 + 1) + t * q + tbl[n2].hi * r1 + (tbl[n2].hi - 2); RETURNI(t / 2); } if (k < -7) { t = tbl[n2].lo + t * (q + r1) + tbl[n2].hi; RETURNI(t * twopk - 1); } if (k > 2 * LDBL_MANT_DIG - 1) { t = tbl[n2].lo + t * (q + r1) + tbl[n2].hi; if (k == LDBL_MAX_EXP) RETURNI(t * 2 * 0x1p16383L - 1); RETURNI(t * twopk - 1); } v.xbits.expsign = BIAS - k; twomk = v.e; if (k > LDBL_MANT_DIG - 1) t = tbl[n2].lo - twomk + t * (q + r1) + tbl[n2].hi; else t = tbl[n2].lo + t * (q + r1) + (tbl[n2].hi - twomk); RETURNI(t * twopk); }
long double expl(long double x) { union IEEEl2bits u, v; long double q, r, r1, t, twopk, twopkp10000; double dr, fn, r2; int k, n, n2; uint16_t hx, ix; /* Filter out exceptional cases. */ u.e = x; hx = u.xbits.expsign; ix = hx & 0x7fff; if (ix >= BIAS + 13) { /* |x| >= 8192 or x is NaN */ if (ix == BIAS + LDBL_MAX_EXP) { if (hx & 0x8000) /* x is -Inf or -NaN */ return (-1 / x); return (x + x); /* x is +Inf or +NaN */ } if (x > o_threshold) return (huge * huge); if (x < u_threshold) return (tiny * tiny); } else if (ix < BIAS - 114) { /* |x| < 0x1p-114 */ return (1 + x); /* 1 with inexact iff x != 0 */ } ENTERI(); /* Reduce x to (k*ln2 + endpoint[n2] + r1 + r2). */ /* Use a specialized rint() to get fn. Assume round-to-nearest. */ /* XXX assume no extra precision for the additions, as for trig fns. */ /* XXX this set of comments is now quadruplicated. */ fn = (double)x * INV_L + 0x1.8p52 - 0x1.8p52; #if defined(HAVE_EFFICIENT_IRINT) n = irint(fn); #else n = (int)fn; #endif n2 = (unsigned)n % INTERVALS; k = n >> LOG2_INTERVALS; r1 = x - fn * L1; r2 = fn * -L2; r = r1 + r2; /* Prepare scale factors. */ /* XXX sparc64 multiplication is so slow that scalbnl() is faster. */ v.e = 1; if (k >= LDBL_MIN_EXP) { v.xbits.expsign = BIAS + k; twopk = v.e; } else { v.xbits.expsign = BIAS + k + 10000; twopkp10000 = v.e; } /* Evaluate expl(endpoint[n2] + r1 + r2) = tbl[n2] * expl(r1 + r2). */ dr = r; q = r2 + r * r * (A2 + r * (A3 + r * (A4 + r * (A5 + r * (A6 + dr * (A7 + dr * (A8 + dr * (A9 + dr * A10)))))))); t = tbl[n2].lo + tbl[n2].hi; t = tbl[n2].lo + t * (q + r1) + tbl[n2].hi; /* Scale by 2**k. */ if (k >= LDBL_MIN_EXP) { if (k == LDBL_MAX_EXP) RETURNI(t * 2 * 0x1p16383L); RETURNI(t * twopk); } else { RETURNI(t * twopkp10000 * twom10000); } }
/** * Compute the base 2 exponential of x for Intel 80-bit format. * * Accuracy: Peak error < 0.511 ulp. * * Method: (equally-spaced tables) * * Reduce x: * x = 2**k + y, for integer k and |y| <= 1/2. * Thus we have exp2l(x) = 2**k * exp2(y). * * Reduce y: * y = i/TBLSIZE + z for integer i near y * TBLSIZE. * Thus we have exp2(y) = exp2(i/TBLSIZE) * exp2(z), * with |z| <= 2**-(TBLBITS+1). * * We compute exp2(i/TBLSIZE) via table lookup and exp2(z) via a * degree-6 minimax polynomial with maximum error under 2**-75.6. * The table entries each have 104 bits of accuracy, encoded as * a pair of double precision values. */ long double exp2l(long double x) { union IEEEl2bits u, v; long double r, twopk, twopkp10000, z; uint32_t hx, ix, i0; int k; /* Filter out exceptional cases. */ u.e = x; hx = u.xbits.expsign; ix = hx & 0x7fff; if (ix >= BIAS + 14) { /* |x| >= 16384 or x is NaN */ if (ix == BIAS + LDBL_MAX_EXP) { if (hx & 0x8000 && u.xbits.man == 1ULL << 63) return (0.0L); /* x is -Inf */ return (x + x); /* x is +Inf, NaN or unsupported */ } if (x >= 16384) return (huge * huge); /* overflow */ if (x <= -16446) return (twom10000 * twom10000); /* underflow */ } else if (ix <= BIAS - 66) { /* |x| < 0x1p-65 (includes pseudos) */ return (1.0L + x); /* 1 with inexact */ } ENTERI(); /* * Reduce x, computing z, i0, and k. The low bits of x + redux * contain the 16-bit integer part of the exponent (k) followed by * TBLBITS fractional bits (i0). We use bit tricks to extract these * as integers, then set z to the remainder. * * Example: Suppose x is 0xabc.123456p0 and TBLBITS is 8. * Then the low-order word of x + redux is 0x000abc12, * We split this into k = 0xabc and i0 = 0x12 (adjusted to * index into the table), then we compute z = 0x0.003456p0. * * XXX If the exponent is negative, the computation of k depends on * '>>' doing sign extension. */ u.e = x + redux; i0 = u.bits.manl + TBLSIZE / 2; k = (int)i0 >> TBLBITS; i0 = (i0 & (TBLSIZE - 1)) << 1; u.e -= redux; z = x - u.e; v.xbits.man = 1ULL << 63; if (k >= LDBL_MIN_EXP) { v.xbits.expsign = BIAS + k; twopk = v.e; } else { v.xbits.expsign = BIAS + k + 10000; twopkp10000 = v.e; } /* Compute r = exp2l(y) = exp2lt[i0] * p(z). */ long double t_hi = tbl[i0]; long double t_lo = tbl[i0 + 1]; r = t_lo + (t_hi + t_lo) * z * (P1.e + z * (P2 + z * (P3 + z * (P4 + z * (P5 + z * P6))))) + t_hi; /* Scale by 2**k. */ if (k >= LDBL_MIN_EXP) { if (k == LDBL_MAX_EXP) RETURNI(r * 2.0 * 0x1p16383L); RETURNI(r * twopk); } else { RETURNI(r * twopkp10000 * twom10000); } }
long double cbrtl(long double x) { union IEEEl2bits u, v; long double r, s, t, w; double dr, dt, dx; float ft, fx; uint32_t hx; uint16_t expsign; int k; u.e = x; expsign = u.xbits.expsign; k = expsign & 0x7fff; /* * If x = +-Inf, then cbrt(x) = +-Inf. * If x = NaN, then cbrt(x) = NaN. */ if (k == BIAS + LDBL_MAX_EXP) return (x + x); ENTERI(); if (k == 0) { /* If x = +-0, then cbrt(x) = +-0. */ if ((u.bits.manh | u.bits.manl) == 0) RETURNI(x); /* Adjust subnormal numbers. */ u.e *= 0x1.0p514; k = u.bits.exp; k -= BIAS + 514; } else k -= BIAS; u.xbits.expsign = BIAS; v.e = 1; x = u.e; switch (k % 3) { case 1: case -2: x = 2*x; k--; break; case 2: case -1: x = 4*x; k -= 2; break; } v.xbits.expsign = (expsign & 0x8000) | (BIAS + k / 3); /* * The following is the guts of s_cbrtf, with the handling of * special values removed and extra care for accuracy not taken, * but with most of the extra accuracy not discarded. */ /* ~5-bit estimate: */ fx = x; GET_FLOAT_WORD(hx, fx); SET_FLOAT_WORD(ft, ((hx & 0x7fffffff) / 3 + B1)); /* ~16-bit estimate: */ dx = x; dt = ft; dr = dt * dt * dt; dt = dt * (dx + dx + dr) / (dx + dr + dr); /* ~47-bit estimate: */ dr = dt * dt * dt; dt = dt * (dx + dx + dr) / (dx + dr + dr); #if LDBL_MANT_DIG == 64 /* * dt is cbrtl(x) to ~47 bits (after x has been reduced to 1 <= x < 8). * Round it away from zero to 32 bits (32 so that t*t is exact, and * away from zero for technical reasons). */ volatile double vd2 = 0x1.0p32; volatile double vd1 = 0x1.0p-31; #define vd ((long double)vd2 + vd1) t = dt + vd - 0x1.0p32; #elif LDBL_MANT_DIG == 113 /* * Round dt away from zero to 47 bits. Since we don't trust the 47, * add 2 47-bit ulps instead of 1 to round up. Rounding is slow and * might be avoidable in this case, since on most machines dt will * have been evaluated in 53-bit precision and the technical reasons * for rounding up might not apply to either case in cbrtl() since * dt is much more accurate than needed. */ t = dt + 0x2.0p-46 + 0x1.0p60L - 0x1.0p60; #else #error "Unsupported long double format" #endif /* * Final step Newton iteration to 64 or 113 bits with * error < 0.667 ulps */ s=t*t; /* t*t is exact */ r=x/s; /* error <= 0.5 ulps; |r| < |t| */ w=t+t; /* t+t is exact */ r=(r-t)/(w+r); /* r-t is exact; w+r ~= 3*t */ t=t+t*r; /* error <= 0.5 + 0.5/3 + epsilon */ t *= v.e; RETURNI(t); }