TEST (void) { float a[NUM]; int r[NUM]; int i; init_src (a); for (i = 0; i < NUM; i++) r[i] = (int) __builtin_floorf (a[i]); /* check results: */ for (i = 0; i < NUM; i++) if (r[i] != (int) __builtin_floorf (a[i])) abort(); }
float __builtin_lgammaf_r(float x, int *signgamp) /* wrapper lgammaf_r */ { #ifdef _IEEE_LIBM return __hide_ieee754_lgammaf_r(x,signgamp); #else float y; struct exception exc; y = __hide_ieee754_lgammaf_r(x,signgamp); if(_LIB_VERSION == _IEEE_) return y; if(!__finitef(y)&&__finitef(x)) { #ifndef HUGE_VAL #define HUGE_VAL inf double inf = 0.0; SET_HIGH_WORD(inf,0x7ff00000); /* set inf to infinite */ #endif exc.name = "lgammaf"; exc.err = 0; exc.arg1 = exc.arg2 = (double)x; if (_LIB_VERSION == _SVID_) exc.retval = HUGE; else exc.retval = HUGE_VAL; if(__builtin_floorf(x)==x&&x<=(float)0.0) { /* lgammaf(-integer) or lgamma(0) */ exc.type = SING; if (_LIB_VERSION == _POSIX_) errno = EDOM; else if (!__builtin_matherr(&exc)) { errno = EDOM; } } else { /* lgammaf(finite) overflow */ exc.type = OVERFLOW; if (_LIB_VERSION == _POSIX_) errno = ERANGE; else if (!__builtin_matherr(&exc)) { errno = ERANGE; } } if (exc.err != 0) errno = exc.err; return (float)exc.retval; } else return y; #endif }
float fm_exp2f(float x) { float ipart, fpart; ufi_t epart; ipart = __builtin_floorf(x + 0.5f); fpart = x - ipart; FM_FLOAT_INIT_EXP(epart,ipart); x = fm_exp2f_p[0]; x = x*fpart + fm_exp2f_p[1]; x = x*fpart + fm_exp2f_p[2]; x = x*fpart + fm_exp2f_p[3]; x = x*fpart + fm_exp2f_p[4]; x = x*fpart + fm_exp2f_p[5]; x = x*fpart + fm_exp2f_p[6]; return epart.f*x; }
float foo (float x) { return __builtin_floorf (x); }
/*:::::*/ FBCALL float fb_FIXSingle( float x ) { return __builtin_floorf( __builtin_fabsf( x ) ) * fb_SGNSingle( x ); }
// CHECK-LABEL: define void @test_float_builtin_ops void test_float_builtin_ops(float F, double D, long double LD) { volatile float resf; volatile double resd; volatile long double resld; resf = __builtin_fmodf(F,F); // CHECK: frem float resd = __builtin_fmod(D,D); // CHECK: frem double resld = __builtin_fmodl(LD,LD); // CHECK: frem x86_fp80 resf = __builtin_fabsf(F); resd = __builtin_fabs(D); resld = __builtin_fabsl(LD); // CHECK: call float @llvm.fabs.f32(float // CHECK: call double @llvm.fabs.f64(double // CHECK: call x86_fp80 @llvm.fabs.f80(x86_fp80 resf = __builtin_canonicalizef(F); resd = __builtin_canonicalize(D); resld = __builtin_canonicalizel(LD); // CHECK: call float @llvm.canonicalize.f32(float // CHECK: call double @llvm.canonicalize.f64(double // CHECK: call x86_fp80 @llvm.canonicalize.f80(x86_fp80 resf = __builtin_fminf(F, F); // CHECK: call float @llvm.minnum.f32 resd = __builtin_fmin(D, D); // CHECK: call double @llvm.minnum.f64 resld = __builtin_fminl(LD, LD); // CHECK: call x86_fp80 @llvm.minnum.f80 resf = __builtin_fmaxf(F, F); // CHECK: call float @llvm.maxnum.f32 resd = __builtin_fmax(D, D); // CHECK: call double @llvm.maxnum.f64 resld = __builtin_fmaxl(LD, LD); // CHECK: call x86_fp80 @llvm.maxnum.f80 resf = __builtin_fabsf(F); // CHECK: call float @llvm.fabs.f32 resd = __builtin_fabs(D); // CHECK: call double @llvm.fabs.f64 resld = __builtin_fabsl(LD); // CHECK: call x86_fp80 @llvm.fabs.f80 resf = __builtin_copysignf(F, F); // CHECK: call float @llvm.copysign.f32 resd = __builtin_copysign(D, D); // CHECK: call double @llvm.copysign.f64 resld = __builtin_copysignl(LD, LD); // CHECK: call x86_fp80 @llvm.copysign.f80 resf = __builtin_ceilf(F); // CHECK: call float @llvm.ceil.f32 resd = __builtin_ceil(D); // CHECK: call double @llvm.ceil.f64 resld = __builtin_ceill(LD); // CHECK: call x86_fp80 @llvm.ceil.f80 resf = __builtin_floorf(F); // CHECK: call float @llvm.floor.f32 resd = __builtin_floor(D); // CHECK: call double @llvm.floor.f64 resld = __builtin_floorl(LD); // CHECK: call x86_fp80 @llvm.floor.f80 resf = __builtin_truncf(F); // CHECK: call float @llvm.trunc.f32 resd = __builtin_trunc(D); // CHECK: call double @llvm.trunc.f64 resld = __builtin_truncl(LD); // CHECK: call x86_fp80 @llvm.trunc.f80 resf = __builtin_rintf(F); // CHECK: call float @llvm.rint.f32 resd = __builtin_rint(D); // CHECK: call double @llvm.rint.f64 resld = __builtin_rintl(LD); // CHECK: call x86_fp80 @llvm.rint.f80 resf = __builtin_nearbyintf(F); // CHECK: call float @llvm.nearbyint.f32 resd = __builtin_nearbyint(D); // CHECK: call double @llvm.nearbyint.f64 resld = __builtin_nearbyintl(LD); // CHECK: call x86_fp80 @llvm.nearbyint.f80 resf = __builtin_roundf(F); // CHECK: call float @llvm.round.f32 resd = __builtin_round(D); // CHECK: call double @llvm.round.f64 resld = __builtin_roundl(LD); // CHECK: call x86_fp80 @llvm.round.f80 }