__m128d test_mm_cmpge_pd(__m128d A, __m128d B) { // DAG-LABEL: test_mm_cmpge_pd // DAG: call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 2) // // ASM-LABEL: test_mm_cmpge_pd // ASM: cmplepd return _mm_cmpge_pd(A, B); }
void _SIMD_cmpge_pd(__SIMDd a, __SIMDd b, void** resultPtr) { __SIMDd* result = (__SIMDd*)malloc(sizeof(__SIMDd)); *resultPtr = result; #ifdef USE_SSE *result = _mm_cmpge_pd(a,b); #elif defined USE_AVX *result = _mm256_cmp(a,b,29); #elif defined USE_IBM *result = vec_cmpge(a,b); #endif }
BI_FORCE_INLINE inline sse_double operator>=(const sse_double& o1, const sse_double& o2) { sse_double res; res.packed = _mm_cmpge_pd(o1.packed, o2.packed); return res; }
{ template<class Info> struct call<is_greater_equal_,tag::simd_(tag::arithmetic_,tag::sse_),Info> { template<class Sig> struct result; template<class This,class A> struct result<This(A,A)> : meta::strip<A> {}; NT2_FUNCTOR_CALL_DISPATCH( 2 , typename nt2::meta::scalar_of<A0>::type , (3,(double,float,arithmetic_)) ) NT2_FUNCTOR_CALL_EVAL_IF(2,double) { A0 that = { _mm_cmpge_pd(a0,a1) }; return that; } NT2_FUNCTOR_CALL_EVAL_IF(2,float ) { A0 that = { _mm_cmpge_ps(a0,a1) }; return that; } NT2_FUNCTOR_CALL_EVAL_IF(2,arithmetic_) { A0 that = { complement(is_less(a0,a1) ) }; return that; } };
__m128d test_mm_cmpge_pd(__m128d __a, __m128d __b) { // CHECK-LABEL: @test_mm_cmpge_pd // CHECK: @llvm.x86.sse2.cmp.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 2) return _mm_cmpge_pd(__a, __b); }