static void qemu_f128M_to_extF80M(const float128_t *from, extFloat80_t *to) { float128 qfrom; floatx80 qto; qfrom = soft_to_qemu128(*from); qto = float128_to_floatx80(qfrom, &qsf); *to = qemu_to_soft80(qto); }
static floatx80 sincos_approximation(int neg, float128 r, Bit64u quotient, float_status_t &status) { if (quotient & 0x1) { r = poly_cos(r, status); neg = 0; } else { r = poly_sin(r, status); } floatx80 result = float128_to_floatx80(r, status); if (quotient & 0x2) neg = ! neg; if (neg) floatx80_chs(result); return result; }
static floatx80 sincos_approximation(int neg, float128 r, UINT64 quotient) { if (quotient & 0x1) { r = poly_cos(r); neg = 0; } else { r = poly_sin(r); } floatx80 result = float128_to_floatx80(r); if (quotient & 0x2) neg = ! neg; if (neg) result = floatx80_chs(result); return result; }
int ftan(floatx80 &a, float_status_t &status) { Bit64u aSig0, aSig1 = 0; Bit32s aExp, zExp, expDiff; int aSign, zSign; int q = 0; // handle unsupported extended double-precision floating encodings if (floatx80_is_unsupported(a)) { goto invalid; } aSig0 = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); /* invalid argument */ if (aExp == 0x7FFF) { if ((Bit64u) (aSig0<<1)) { a = propagateFloatx80NaN(a, status); return 0; } invalid: float_raise(status, float_flag_invalid); a = floatx80_default_nan; return 0; } if (aExp == 0) { if (aSig0 == 0) return 0; float_raise(status, float_flag_denormal); /* handle pseudo denormals */ if (! (aSig0 & BX_CONST64(0x8000000000000000))) { float_raise(status, float_flag_inexact | float_flag_underflow); return 0; } normalizeFloatx80Subnormal(aSig0, &aExp, &aSig0); } zSign = aSign; zExp = EXP_BIAS; expDiff = aExp - zExp; /* argument is out-of-range */ if (expDiff >= 63) return -1; float_raise(status, float_flag_inexact); if (expDiff < -1) { // doesn't require reduction if (expDiff <= -68) { a = packFloatx80(aSign, aExp, aSig0); return 0; } zExp = aExp; } else { q = reduce_trig_arg(expDiff, zSign, aSig0, aSig1); } /* **************************** */ /* argument reduction completed */ /* **************************** */ /* using float128 for approximation */ float128 r = normalizeRoundAndPackFloat128(0, zExp-0x10, aSig0, aSig1, status); float128 sin_r = poly_sin(r, status); float128 cos_r = poly_cos(r, status); if (q & 0x1) { r = float128_div(cos_r, sin_r, status); zSign = ! zSign; } else { r = float128_div(sin_r, cos_r, status); } a = float128_to_floatx80(r, status); if (zSign) floatx80_chs(a); return 0; }
int floatx80_ftan(floatx80 &a) { UINT64 aSig0, aSig1 = 0; INT32 aExp, zExp, expDiff; int aSign, zSign; int q = 0; aSig0 = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); /* invalid argument */ if (aExp == 0x7FFF) { if ((UINT64) (aSig0<<1)) { a = propagateFloatx80NaNOneArg(a); return 0; } float_raise(float_flag_invalid); a = floatx80_default_nan; return 0; } if (aExp == 0) { if (aSig0 == 0) return 0; // float_raise(float_flag_denormal); /* handle pseudo denormals */ if (! (aSig0 & U64(0x8000000000000000))) { float_raise(float_flag_inexact | float_flag_underflow); return 0; } normalizeFloatx80Subnormal(aSig0, &aExp, &aSig0); } zSign = aSign; zExp = EXP_BIAS; expDiff = aExp - zExp; /* argument is out-of-range */ if (expDiff >= 63) return -1; float_raise(float_flag_inexact); if (expDiff < -1) { // doesn't require reduction if (expDiff <= -68) { a = packFloatx80(aSign, aExp, aSig0); return 0; } zExp = aExp; } else { q = reduce_trig_arg(expDiff, zSign, aSig0, aSig1); } /* **************************** */ /* argument reduction completed */ /* **************************** */ /* using float128 for approximation */ float128 r = normalizeRoundAndPackFloat128(0, zExp-0x10, aSig0, aSig1); float128 sin_r = poly_sin(r); float128 cos_r = poly_cos(r); if (q & 0x1) { r = float128_div(cos_r, sin_r); zSign = ! zSign; } else { r = float128_div(sin_r, cos_r); } a = float128_to_floatx80(r); if (zSign) a = floatx80_chs(a); return 0; }
floatx80 fyl2xp1(floatx80 a, floatx80 b) { INT32 aExp, bExp; UINT64 aSig, bSig, zSig0, zSig1, zSig2; int aSign, bSign; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); bSig = extractFloatx80Frac(b); bExp = extractFloatx80Exp(b); bSign = extractFloatx80Sign(b); int zSign = aSign ^ bSign; if (aExp == 0x7FFF) { if ((UINT64) (aSig<<1) || ((bExp == 0x7FFF) && (UINT64) (bSig<<1))) { return propagateFloatx80NaN(a, b); } if (aSign) { invalid: float_raise(float_flag_invalid); return floatx80_default_nan; } else { if (bExp == 0) { if (bSig == 0) goto invalid; float_raise(float_flag_denormal); } return packFloatx80(bSign, 0x7FFF, U64(0x8000000000000000)); } } if (bExp == 0x7FFF) { if ((UINT64) (bSig<<1)) return propagateFloatx80NaN(a, b); if (aExp == 0) { if (aSig == 0) goto invalid; float_raise(float_flag_denormal); } return packFloatx80(zSign, 0x7FFF, U64(0x8000000000000000)); } if (aExp == 0) { if (aSig == 0) { if (bSig && (bExp == 0)) float_raise(float_flag_denormal); return packFloatx80(zSign, 0, 0); } float_raise(float_flag_denormal); normalizeFloatx80Subnormal(aSig, &aExp, &aSig); } if (bExp == 0) { if (bSig == 0) return packFloatx80(zSign, 0, 0); float_raise(float_flag_denormal); normalizeFloatx80Subnormal(bSig, &bExp, &bSig); } float_raise(float_flag_inexact); if (aSign && aExp >= 0x3FFF) return a; if (aExp >= 0x3FFC) // big argument { return fyl2x(floatx80_add(a, floatx80_one), b); } // handle tiny argument if (aExp < EXP_BIAS-70) { // first order approximation, return (a*b)/ln(2) INT32 zExp = aExp + FLOAT_LN2INV_EXP - 0x3FFE; mul128By64To192(FLOAT_LN2INV_HI, FLOAT_LN2INV_LO, aSig, &zSig0, &zSig1, &zSig2); if (0 < (INT64) zSig0) { shortShift128Left(zSig0, zSig1, 1, &zSig0, &zSig1); --zExp; } zExp = zExp + bExp - 0x3FFE; mul128By64To192(zSig0, zSig1, bSig, &zSig0, &zSig1, &zSig2); if (0 < (INT64) zSig0) { shortShift128Left(zSig0, zSig1, 1, &zSig0, &zSig1); --zExp; } return roundAndPackFloatx80(80, aSign ^ bSign, zExp, zSig0, zSig1); } /* ******************************** */ /* using float128 for approximation */ /* ******************************** */ shift128Right(aSig<<1, 0, 16, &zSig0, &zSig1); float128 x = packFloat128(aSign, aExp, zSig0, zSig1); x = poly_l2p1(x); return floatx80_mul(b, float128_to_floatx80(x)); }
static floatx80 fyl2x(floatx80 a, floatx80 b) { UINT64 aSig = extractFloatx80Frac(a); INT32 aExp = extractFloatx80Exp(a); int aSign = extractFloatx80Sign(a); UINT64 bSig = extractFloatx80Frac(b); INT32 bExp = extractFloatx80Exp(b); int bSign = extractFloatx80Sign(b); int zSign = bSign ^ 1; if (aExp == 0x7FFF) { if ((UINT64) (aSig<<1) || ((bExp == 0x7FFF) && (UINT64) (bSig<<1))) { return propagateFloatx80NaN(a, b); } if (aSign) { invalid: float_raise(float_flag_invalid); return floatx80_default_nan; } else { if (bExp == 0) { if (bSig == 0) goto invalid; float_raise(float_flag_denormal); } return packFloatx80(bSign, 0x7FFF, U64(0x8000000000000000)); } } if (bExp == 0x7FFF) { if ((UINT64) (bSig<<1)) return propagateFloatx80NaN(a, b); if (aSign && (UINT64)(aExp | aSig)) goto invalid; if (aSig && (aExp == 0)) float_raise(float_flag_denormal); if (aExp < 0x3FFF) { return packFloatx80(zSign, 0x7FFF, U64(0x8000000000000000)); } if (aExp == 0x3FFF && ((UINT64) (aSig<<1) == 0)) goto invalid; return packFloatx80(bSign, 0x7FFF, U64(0x8000000000000000)); } if (aExp == 0) { if (aSig == 0) { if ((bExp | bSig) == 0) goto invalid; float_raise(float_flag_divbyzero); return packFloatx80(zSign, 0x7FFF, U64(0x8000000000000000)); } if (aSign) goto invalid; float_raise(float_flag_denormal); normalizeFloatx80Subnormal(aSig, &aExp, &aSig); } if (aSign) goto invalid; if (bExp == 0) { if (bSig == 0) { if (aExp < 0x3FFF) return packFloatx80(zSign, 0, 0); return packFloatx80(bSign, 0, 0); } float_raise(float_flag_denormal); normalizeFloatx80Subnormal(bSig, &bExp, &bSig); } if (aExp == 0x3FFF && ((UINT64) (aSig<<1) == 0)) return packFloatx80(bSign, 0, 0); float_raise(float_flag_inexact); int ExpDiff = aExp - 0x3FFF; aExp = 0; if (aSig >= SQRT2_HALF_SIG) { ExpDiff++; aExp--; } /* ******************************** */ /* using float128 for approximation */ /* ******************************** */ UINT64 zSig0, zSig1; shift128Right(aSig<<1, 0, 16, &zSig0, &zSig1); float128 x = packFloat128(0, aExp+0x3FFF, zSig0, zSig1); x = poly_l2(x); x = float128_add(x, int64_to_float128((INT64) ExpDiff)); return floatx80_mul(b, float128_to_floatx80(x)); }
floatx80 fpatan(floatx80 a, floatx80 b, float_status_t &status) { // handle unsupported extended double-precision floating encodings if (floatx80_is_unsupported(a) || floatx80_is_unsupported(b)) { float_raise(status, float_flag_invalid); return floatx80_default_nan; } Bit64u aSig = extractFloatx80Frac(a); Bit32s aExp = extractFloatx80Exp(a); int aSign = extractFloatx80Sign(a); Bit64u bSig = extractFloatx80Frac(b); Bit32s bExp = extractFloatx80Exp(b); int bSign = extractFloatx80Sign(b); int zSign = aSign ^ bSign; if (bExp == 0x7FFF) { if ((Bit64u) (bSig<<1)) return propagateFloatx80NaN(a, b, status); if (aExp == 0x7FFF) { if ((Bit64u) (aSig<<1)) return propagateFloatx80NaN(a, b, status); if (aSign) { /* return 3PI/4 */ return roundAndPackFloatx80(80, bSign, FLOATX80_3PI4_EXP, FLOAT_3PI4_HI, FLOAT_3PI4_LO, status); } else { /* return PI/4 */ return roundAndPackFloatx80(80, bSign, FLOATX80_PI4_EXP, FLOAT_PI_HI, FLOAT_PI_LO, status); } } if (aSig && (aExp == 0)) float_raise(status, float_flag_denormal); /* return PI/2 */ return roundAndPackFloatx80(80, bSign, FLOATX80_PI2_EXP, FLOAT_PI_HI, FLOAT_PI_LO, status); } if (aExp == 0x7FFF) { if ((Bit64u) (aSig<<1)) return propagateFloatx80NaN(a, b, status); if (bSig && (bExp == 0)) float_raise(status, float_flag_denormal); return_PI_or_ZERO: if (aSign) { /* return PI */ return roundAndPackFloatx80(80, bSign, FLOATX80_PI_EXP, FLOAT_PI_HI, FLOAT_PI_LO, status); } else { /* return 0 */ return packFloatx80(bSign, 0, 0); } } if (bExp == 0) { if (bSig == 0) { if (aSig && (aExp == 0)) float_raise(status, float_flag_denormal); goto return_PI_or_ZERO; } float_raise(status, float_flag_denormal); normalizeFloatx80Subnormal(bSig, &bExp, &bSig); } if (aExp == 0) { if (aSig == 0) /* return PI/2 */ return roundAndPackFloatx80(80, bSign, FLOATX80_PI2_EXP, FLOAT_PI_HI, FLOAT_PI_LO, status); float_raise(status, float_flag_denormal); normalizeFloatx80Subnormal(aSig, &aExp, &aSig); } float_raise(status, float_flag_inexact); /* |a| = |b| ==> return PI/4 */ if (aSig == bSig && aExp == bExp) return roundAndPackFloatx80(80, bSign, FLOATX80_PI4_EXP, FLOAT_PI_HI, FLOAT_PI_LO, status); /* ******************************** */ /* using float128 for approximation */ /* ******************************** */ float128 a128 = normalizeRoundAndPackFloat128(0, aExp-0x10, aSig, 0, status); float128 b128 = normalizeRoundAndPackFloat128(0, bExp-0x10, bSig, 0, status); float128 x; int swap = 0, add_pi6 = 0, add_pi4 = 0; if (aExp > bExp || (aExp == bExp && aSig > bSig)) { x = float128_div(b128, a128, status); } else { x = float128_div(a128, b128, status); swap = 1; } Bit32s xExp = extractFloat128Exp(x); if (xExp <= EXP_BIAS-40) goto approximation_completed; if (x.hi >= BX_CONST64(0x3ffe800000000000)) // 3/4 < x < 1 { /* arctan(x) = arctan((x-1)/(x+1)) + pi/4 */ float128 t1 = float128_sub(x, float128_one, status); float128 t2 = float128_add(x, float128_one, status); x = float128_div(t1, t2, status); add_pi4 = 1; } else { /* argument correction */ if (xExp >= 0x3FFD) // 1/4 < x < 3/4 { /* arctan(x) = arctan((x*sqrt(3)-1)/(x+sqrt(3))) + pi/6 */ float128 t1 = float128_mul(x, float128_sqrt3, status); float128 t2 = float128_add(x, float128_sqrt3, status); x = float128_sub(t1, float128_one, status); x = float128_div(x, t2, status); add_pi6 = 1; } } x = poly_atan(x, status); if (add_pi6) x = float128_add(x, float128_pi6, status); if (add_pi4) x = float128_add(x, float128_pi4, status); approximation_completed: if (swap) x = float128_sub(float128_pi2, x, status); floatx80 result = float128_to_floatx80(x, status); if (zSign) floatx80_chs(result); int rSign = extractFloatx80Sign(result); if (!bSign && rSign) return floatx80_add(result, floatx80_pi, status); if (bSign && !rSign) return floatx80_sub(result, floatx80_pi, status); return result; }