float128_t softfloat_normRoundPackToF128( bool sign, int_fast32_t exp, uint_fast64_t sig64, uint_fast64_t sig0 ) { int_fast8_t shiftDist; struct uint128 sig128; union ui128_f128 uZ; uint_fast64_t sigExtra; struct uint128_extra sig128Extra; if ( ! sig64 ) { exp -= 64; sig64 = sig0; sig0 = 0; } shiftDist = softfloat_countLeadingZeros64( sig64 ) - 15; exp -= shiftDist; if ( 0 <= shiftDist ) { if ( shiftDist ) { sig128 = softfloat_shortShiftLeft128( sig64, sig0, shiftDist ); sig64 = sig128.v64; sig0 = sig128.v0; } if ( (uint32_t) exp < 0x7FFD ) { uZ.ui.v64 = packToF128UI64( sign, sig64 | sig0 ? exp : 0, sig64 ); uZ.ui.v0 = sig0; return uZ.f; } sigExtra = 0; } else { sig128Extra = softfloat_shortShiftRightJam128Extra( sig64, sig0, 0, -shiftDist ); sig64 = sig128Extra.v.v64; sig0 = sig128Extra.v.v0; sigExtra = sig128Extra.extra; } return softfloat_roundPackToF128( sign, exp, sig64, sig0, sigExtra ); }
float128_t f128_mul( float128_t a, float128_t b ) { union ui128_f128 uA; uint_fast64_t uiA64, uiA0; bool signA; int_fast32_t expA; struct uint128 sigA; union ui128_f128 uB; uint_fast64_t uiB64, uiB0; bool signB; int_fast32_t expB; struct uint128 sigB; bool signZ; uint_fast64_t magBits; struct exp32_sig128 normExpSig; int_fast32_t expZ; uint64_t sig256Z[4]; uint_fast64_t sigZExtra; struct uint128 sigZ; struct uint128_extra sig128Extra; struct uint128 uiZ; union ui128_f128 uZ; /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ uA.f = a; uiA64 = uA.ui.v64; uiA0 = uA.ui.v0; signA = signF128UI64( uiA64 ); expA = expF128UI64( uiA64 ); sigA.v64 = fracF128UI64( uiA64 ); sigA.v0 = uiA0; uB.f = b; uiB64 = uB.ui.v64; uiB0 = uB.ui.v0; signB = signF128UI64( uiB64 ); expB = expF128UI64( uiB64 ); sigB.v64 = fracF128UI64( uiB64 ); sigB.v0 = uiB0; signZ = signA ^ signB; /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ if ( expA == 0x7FFF ) { if ( (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) ) { goto propagateNaN; } magBits = expB | sigB.v64 | sigB.v0; goto infArg; } if ( expB == 0x7FFF ) { if ( sigB.v64 | sigB.v0 ) goto propagateNaN; magBits = expA | sigA.v64 | sigA.v0; goto infArg; } /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ if ( ! expA ) { if ( ! (sigA.v64 | sigA.v0) ) goto zero; normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); expA = normExpSig.exp; sigA = normExpSig.sig; } if ( ! expB ) { if ( ! (sigB.v64 | sigB.v0) ) goto zero; normExpSig = softfloat_normSubnormalF128Sig( sigB.v64, sigB.v0 ); expB = normExpSig.exp; sigB = normExpSig.sig; } /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ expZ = expA + expB - 0x4000; sigA.v64 |= UINT64_C( 0x0001000000000000 ); sigB = softfloat_shortShiftLeft128( sigB.v64, sigB.v0, 16 ); softfloat_mul128To256M( sigA.v64, sigA.v0, sigB.v64, sigB.v0, sig256Z ); sigZExtra = sig256Z[indexWord( 4, 1 )] | (sig256Z[indexWord( 4, 0 )] != 0); sigZ = softfloat_add128( sig256Z[indexWord( 4, 3 )], sig256Z[indexWord( 4, 2 )], sigA.v64, sigA.v0 ); if ( UINT64_C( 0x0002000000000000 ) <= sigZ.v64 ) { ++expZ; sig128Extra = softfloat_shortShiftRightJam128Extra( sigZ.v64, sigZ.v0, sigZExtra, 1 ); sigZ = sig128Extra.v; sigZExtra = sig128Extra.extra; } return softfloat_roundPackToF128( signZ, expZ, sigZ.v64, sigZ.v0, sigZExtra ); /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ propagateNaN: uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, uiB64, uiB0 ); goto uiZ; /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ infArg: if ( ! magBits ) { softfloat_raiseFlags( softfloat_flag_invalid ); uiZ.v64 = defaultNaNF128UI64; uiZ.v0 = defaultNaNF128UI0; goto uiZ; } uiZ.v64 = packToF128UI64( signZ, 0x7FFF, 0 ); goto uiZ0; /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ zero: uiZ.v64 = packToF128UI64( signZ, 0, 0 ); uiZ0: uiZ.v0 = 0; uiZ: uZ.ui = uiZ; return uZ.f; }
float128_t softfloat_addMagsF128( uint_fast64_t uiA64, uint_fast64_t uiA0, uint_fast64_t uiB64, uint_fast64_t uiB0, bool signZ ) { int_fast32_t expA; struct uint128 sigA; int_fast32_t expB; struct uint128 sigB; int_fast32_t expDiff; struct uint128 uiZ, sigZ; int_fast32_t expZ; uint_fast64_t sigZExtra; struct uint128_extra sig128Extra; union ui128_f128 uZ; expA = expF128UI64( uiA64 ); sigA.v64 = fracF128UI64( uiA64 ); sigA.v0 = uiA0; expB = expF128UI64( uiB64 ); sigB.v64 = fracF128UI64( uiB64 ); sigB.v0 = uiB0; expDiff = expA - expB; if ( ! expDiff ) { if ( expA == 0x7FFF ) { if ( sigA.v64 | sigA.v0 | sigB.v64 | sigB.v0 ) goto propagateNaN; uiZ.v64 = uiA64; uiZ.v0 = uiA0; goto uiZ; } sigZ = softfloat_add128( sigA.v64, sigA.v0, sigB.v64, sigB.v0 ); if ( ! expA ) { uiZ.v64 = packToF128UI64( signZ, 0, sigZ.v64 ); uiZ.v0 = sigZ.v0; goto uiZ; } expZ = expA; sigZ.v64 |= UINT64_C( 0x0002000000000000 ); sigZExtra = 0; goto shiftRight1; } if ( expDiff < 0 ) { if ( expB == 0x7FFF ) { if ( sigB.v64 | sigB.v0 ) goto propagateNaN; uiZ.v64 = packToF128UI64( signZ, 0x7FFF, 0 ); uiZ.v0 = 0; goto uiZ; } expZ = expB; if ( expA ) { sigA.v64 |= UINT64_C( 0x0001000000000000 ); } else { ++expDiff; sigZExtra = 0; if ( ! expDiff ) goto newlyAligned; } sig128Extra = softfloat_shiftRightJam128Extra( sigA.v64, sigA.v0, 0, -expDiff ); sigA = sig128Extra.v; sigZExtra = sig128Extra.extra; } else { if ( expA == 0x7FFF ) { if ( sigA.v64 | sigA.v0 ) goto propagateNaN; uiZ.v64 = uiA64; uiZ.v0 = uiA0; goto uiZ; } expZ = expA; if ( expB ) { sigB.v64 |= UINT64_C( 0x0001000000000000 ); } else { --expDiff; sigZExtra = 0; if ( ! expDiff ) goto newlyAligned; } sig128Extra = softfloat_shiftRightJam128Extra( sigB.v64, sigB.v0, 0, expDiff ); sigB = sig128Extra.v; sigZExtra = sig128Extra.extra; } newlyAligned: sigZ = softfloat_add128( sigA.v64 | UINT64_C( 0x0001000000000000 ), sigA.v0, sigB.v64, sigB.v0 ); --expZ; if ( sigZ.v64 < UINT64_C( 0x0002000000000000 ) ) goto roundAndPack; ++expZ; shiftRight1: sig128Extra = softfloat_shortShiftRightJam128Extra( sigZ.v64, sigZ.v0, sigZExtra, 1 ); sigZ = sig128Extra.v; sigZExtra = sig128Extra.extra; roundAndPack: return softfloat_roundPackToF128( signZ, expZ, sigZ.v64, sigZ.v0, sigZExtra ); propagateNaN: uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, uiB64, uiB0 ); uiZ: uZ.ui = uiZ; return uZ.f; }