float foo (float a, float b, float c, float d) { float ac, bd, ad, bc, y; ac = a * c; bd = b * d; ad = a * d; bc = b * c; if (__builtin_expect (!__builtin_expect ((a) != (a), 0) & !__builtin_expect (!__builtin_expect (((a) - (a)) != ((a) - (a)), 0), 1), 0) || __builtin_expect (!__builtin_expect ((b) != (b), 0) & !__builtin_expect (!__builtin_expect (((b) - (b)) != ((b) - (b)), 0), 1), 0)) a = __builtin_copysignf (__builtin_expect (!__builtin_expect ((a) != (a), 0) & !__builtin_expect (!__builtin_expect (((a) - (a)) != ((a) - (a)), 0), 1), 0) ? 1 : 0, a); c = __builtin_copysignf (__builtin_expect (!__builtin_expect ((c) != (c), 0) & !__builtin_expect (!__builtin_expect (((c) - (c)) != ((c) - (c)), 0), 1), 0) ? 1 : 0, c); if ((__builtin_expect (!__builtin_expect ((ac) != (ac), 0) & !__builtin_expect (!__builtin_expect (((ac) - (ac)) != ((ac) - (ac)), 0), 1), 0) || __builtin_expect (!__builtin_expect ((bd) != (bd), 0) & !__builtin_expect (!__builtin_expect (((bd) - (bd)) != ((bd) - (bd)), 0), 1), 0) || __builtin_expect (!__builtin_expect ((bc) != (bc), 0) & !__builtin_expect (!__builtin_expect (((bc) - (bc)) != ((bc) - (bc)), 0), 1), 0))) d = __builtin_copysignf (0, d); y = a * d + b * c; return y; }
int main () { /* When compiling standard compliant we expect foo to return -0.0. But the variable expansion during unrolling optimization (for this testcase enabled by non-compliant -fassociative-math) instantiates copy(s) of the accumulator which it initializes with +0.0. Hence we expect that foo returns +0.0. */ if (__builtin_copysignf (1.0, foo (0.0 / -5.0, 10)) != 1.0) abort (); exit (0); }
void testf (void) { float xxxxx[8]; int i; xxxxx[0] = __builtin_copysignf (1.0, Yf[0]); xxxxx[1] = __builtin_copysignf (1.0, Yf[1]); xxxxx[2] = __builtin_copysignf (-1.0, Yf[2]); xxxxx[3] = __builtin_copysignf (0.0, Yf[3]); xxxxx[4] = __builtin_copysignf (-0.0, Yf[4]); xxxxx[5] = __builtin_copysignf (-0.0, Yf[5]); xxxxx[6] = __builtin_copysignf (__builtin_inff (), Yf[6]); xxxxx[7] = __builtin_copysignf (-__builtin_nanf (""), Yf[7]); for (i = 0; i < 8; ++i) if (__builtin_memcmp (xxxxx+i, Zf+i, sizeof(float)) != 0) abort (); }
// APFloat doesn't have signalling NaN functions. //double g9 = __builtin_nans(""); //float g10 = __builtin_nansf(""); //long double g11 = __builtin_nansl(""); //int g12 = __builtin_abs(-12); double g13 = __builtin_fabs(-12.); double g13_0 = __builtin_fabs(-0.); double g13_1 = __builtin_fabs(-__builtin_inf()); float g14 = __builtin_fabsf(-12.f); // GCC doesn't eat this one. //long double g15 = __builtin_fabsfl(-12.0L); float g16 = __builtin_copysign(1.0, -1.0); double g17 = __builtin_copysignf(1.0f, -1.0f); long double g18 = __builtin_copysignl(1.0L, -1.0L); char classify_nan [__builtin_fpclassify(+1, -1, -1, -1, -1, __builtin_nan(""))]; char classify_snan [__builtin_fpclassify(+1, -1, -1, -1, -1, __builtin_nans(""))]; char classify_inf [__builtin_fpclassify(-1, +1, -1, -1, -1, __builtin_inf())]; char classify_neg_inf [__builtin_fpclassify(-1, +1, -1, -1, -1, -__builtin_inf())]; char classify_normal [__builtin_fpclassify(-1, -1, +1, -1, -1, 1.539)]; char classify_normal2 [__builtin_fpclassify(-1, -1, +1, -1, -1, 1e-307)]; char classify_denorm [__builtin_fpclassify(-1, -1, -1, +1, -1, 1e-308)]; char classify_denorm2 [__builtin_fpclassify(-1, -1, -1, +1, -1, -1e-308)]; char classify_zero [__builtin_fpclassify(-1, -1, -1, -1, +1, 0.0)]; char classify_neg_zero[__builtin_fpclassify(-1, -1, -1, -1, +1, -0.0)]; char isinf_sign_noninf1[__builtin_isinf_sign(-0.0) == 0 ? 1 : -1]; char isinf_sign_noninf2[__builtin_isinf_sign(1e307) == 0 ? 1 : -1];
float f1(float x) { float t = __builtin_copysignf (1.0f, -x); return x * t; }
inline SPROUT_CONSTEXPR float builtin_copysign(float x, float y) { return __builtin_copysignf(x, y); }
// CHECK-LABEL: define void @test_float_builtin_ops void test_float_builtin_ops(float F, double D, long double LD) { volatile float resf; volatile double resd; volatile long double resld; resf = __builtin_fmodf(F,F); // CHECK: frem float resd = __builtin_fmod(D,D); // CHECK: frem double resld = __builtin_fmodl(LD,LD); // CHECK: frem x86_fp80 resf = __builtin_fabsf(F); resd = __builtin_fabs(D); resld = __builtin_fabsl(LD); // CHECK: call float @llvm.fabs.f32(float // CHECK: call double @llvm.fabs.f64(double // CHECK: call x86_fp80 @llvm.fabs.f80(x86_fp80 resf = __builtin_canonicalizef(F); resd = __builtin_canonicalize(D); resld = __builtin_canonicalizel(LD); // CHECK: call float @llvm.canonicalize.f32(float // CHECK: call double @llvm.canonicalize.f64(double // CHECK: call x86_fp80 @llvm.canonicalize.f80(x86_fp80 resf = __builtin_fminf(F, F); // CHECK: call float @llvm.minnum.f32 resd = __builtin_fmin(D, D); // CHECK: call double @llvm.minnum.f64 resld = __builtin_fminl(LD, LD); // CHECK: call x86_fp80 @llvm.minnum.f80 resf = __builtin_fmaxf(F, F); // CHECK: call float @llvm.maxnum.f32 resd = __builtin_fmax(D, D); // CHECK: call double @llvm.maxnum.f64 resld = __builtin_fmaxl(LD, LD); // CHECK: call x86_fp80 @llvm.maxnum.f80 resf = __builtin_fabsf(F); // CHECK: call float @llvm.fabs.f32 resd = __builtin_fabs(D); // CHECK: call double @llvm.fabs.f64 resld = __builtin_fabsl(LD); // CHECK: call x86_fp80 @llvm.fabs.f80 resf = __builtin_copysignf(F, F); // CHECK: call float @llvm.copysign.f32 resd = __builtin_copysign(D, D); // CHECK: call double @llvm.copysign.f64 resld = __builtin_copysignl(LD, LD); // CHECK: call x86_fp80 @llvm.copysign.f80 resf = __builtin_ceilf(F); // CHECK: call float @llvm.ceil.f32 resd = __builtin_ceil(D); // CHECK: call double @llvm.ceil.f64 resld = __builtin_ceill(LD); // CHECK: call x86_fp80 @llvm.ceil.f80 resf = __builtin_floorf(F); // CHECK: call float @llvm.floor.f32 resd = __builtin_floor(D); // CHECK: call double @llvm.floor.f64 resld = __builtin_floorl(LD); // CHECK: call x86_fp80 @llvm.floor.f80 resf = __builtin_truncf(F); // CHECK: call float @llvm.trunc.f32 resd = __builtin_trunc(D); // CHECK: call double @llvm.trunc.f64 resld = __builtin_truncl(LD); // CHECK: call x86_fp80 @llvm.trunc.f80 resf = __builtin_rintf(F); // CHECK: call float @llvm.rint.f32 resd = __builtin_rint(D); // CHECK: call double @llvm.rint.f64 resld = __builtin_rintl(LD); // CHECK: call x86_fp80 @llvm.rint.f80 resf = __builtin_nearbyintf(F); // CHECK: call float @llvm.nearbyint.f32 resd = __builtin_nearbyint(D); // CHECK: call double @llvm.nearbyint.f64 resld = __builtin_nearbyintl(LD); // CHECK: call x86_fp80 @llvm.nearbyint.f80 resf = __builtin_roundf(F); // CHECK: call float @llvm.round.f32 resd = __builtin_round(D); // CHECK: call double @llvm.round.f64 resld = __builtin_roundl(LD); // CHECK: call x86_fp80 @llvm.round.f80 }
float __copysignf (float x, float y) { return __builtin_copysignf (x, y); }