inline float64x4_t conjugate(const float64x4_t ymm) { static const float64x4_t sign_mask = _mm256_setr_pd(+0.0, -0.0, -0.0, -0.0); float64x4_t xor0 = _mm256_xor_pd(ymm, sign_mask); return xor0; }
__SIMDd _SIMD_neg_pd(__SIMDd a) { #ifdef USE_SSE return _mm_xor_pd(a, _mm_set1_pd(-0.0f)); #elif defined USE_AVX return _mm256_xor_pd(a, _mm_set1_pd(-0.0f)); #elif defined USE_IBM return vec_neg(a); #endif }
/*! * \brief Compute the negative of each element in the given vector * \return a vector containing the negative of each input element */ ETL_STATIC_INLINE(avx_simd_double) minus(avx_simd_double x) { return _mm256_xor_pd(x.value, _mm256_set1_pd(-0.)); }
inline vector4d operator~(const vector4d& rhs) { return _mm256_xor_pd(rhs, _mm256_castsi256_pd(_mm256_set1_epi32(-1))); }
inline vector4d operator^(const vector4d& lhs, const vector4d& rhs) { return _mm256_xor_pd(lhs, rhs); }
BI_FORCE_INLINE inline const avx_double operator-(const avx_double& o) { avx_double res; res.packed = _mm256_xor_pd(_mm256_set1_pd(-0.0), o.packed); return res; }