void invSqrt32f(const float* src, float* dst, int len) { CV_INSTRUMENT_REGION(); int i = 0; #if CV_SIMD const int VECSZ = v_float32::nlanes; for( ; i < len; i += VECSZ*2 ) { if( i + VECSZ*2 > len ) { if( i == 0 || src == dst ) break; i = len - VECSZ*2; } v_float32 t0 = vx_load(src + i), t1 = vx_load(src + i + VECSZ); t0 = v_invsqrt(t0); t1 = v_invsqrt(t1); v_store(dst + i, t0); v_store(dst + i + VECSZ, t1); } vx_cleanup(); #endif for( ; i < len; i++ ) dst[i] = 1/std::sqrt(src[i]); }
void sqrt64f(const double* src, double* dst, int len) { CV_INSTRUMENT_REGION(); int i = 0; #if CV_SIMD_64F const int VECSZ = v_float64::nlanes; for( ; i < len; i += VECSZ*2 ) { if( i + VECSZ*2 > len ) { if( i == 0 || src == dst ) break; i = len - VECSZ*2; } v_float64 t0 = vx_load(src + i), t1 = vx_load(src + i + VECSZ); t0 = v_sqrt(t0); t1 = v_sqrt(t1); v_store(dst + i, t0); v_store(dst + i + VECSZ, t1); } vx_cleanup(); #endif for( ; i < len; i++ ) dst[i] = std::sqrt(src[i]); }
void magnitude64f(const double* x, const double* y, double* mag, int len) { CV_INSTRUMENT_REGION(); int i = 0; #if CV_SIMD_64F const int VECSZ = v_float64::nlanes; for( ; i < len; i += VECSZ*2 ) { if( i + VECSZ*2 > len ) { if( i == 0 || mag == x || mag == y ) break; i = len - VECSZ*2; } v_float64 x0 = vx_load(x + i), x1 = vx_load(x + i + VECSZ); v_float64 y0 = vx_load(y + i), y1 = vx_load(y + i + VECSZ); x0 = v_sqrt(v_muladd(x0, x0, y0*y0)); x1 = v_sqrt(v_muladd(x1, x1, y1*y1)); v_store(mag + i, x0); v_store(mag + i + VECSZ, x1); } vx_cleanup(); #endif for( ; i < len; i++ ) { double x0 = x[i], y0 = y[i]; mag[i] = std::sqrt(x0*x0 + y0*y0); } }
int normL1_(const uchar* a, const uchar* b, int n) { int j = 0, d = 0; #if CV_SIMD for (; j <= n - v_uint8::nlanes; j += v_uint8::nlanes) d += v_reduce_sad(vx_load(a + j), vx_load(b + j)); #endif for( ; j < n; j++ ) d += std::abs(a[j] - b[j]); return d; }
float normL1_(const float* a, const float* b, int n) { int j = 0; float d = 0.f; #if CV_SIMD v_float32 v_d = vx_setzero_f32(); for (; j <= n - v_float32::nlanes; j += v_float32::nlanes) v_d += v_absdiff(vx_load(a + j), vx_load(b + j)); d = v_reduce_sum(v_d); #endif for( ; j < n; j++ ) d += std::abs(a[j] - b[j]); return d; }
int operator () (const short * src0, const uchar * mask, int * dst, int len, int cn) const { if (mask || (cn != 1 && cn != 2 && cn != 4)) return 0; len *= cn; int x = 0; v_int32 v_sum = vx_setzero_s32(); for (; x <= len - v_int16::nlanes; x += v_int16::nlanes) { v_int32 v_src0, v_src1; v_expand(vx_load(src0 + x), v_src0, v_src1); v_sum += v_src0 + v_src1; } if (x <= len - v_int32::nlanes) { v_sum += vx_load_expand(src0 + x); x += v_int32::nlanes; } if (cn == 1) *dst += v_reduce_sum(v_sum); else { int32_t CV_DECL_ALIGNED(CV_SIMD_WIDTH) ar[v_int32::nlanes]; v_store_aligned(ar, v_sum); for (int i = 0; i < v_int32::nlanes; ++i) dst[i % cn] += ar[i]; } v_cleanup(); return x / cn; }
float normL2Sqr_(const float* a, const float* b, int n) { int j = 0; float d = 0.f; #if CV_SIMD v_float32 v_d = vx_setzero_f32(); for (; j <= n - v_float32::nlanes; j += v_float32::nlanes) { v_float32 t = vx_load(a + j) - vx_load(b + j); v_d = v_muladd(t, t, v_d); } d = v_reduce_sum(v_d); #endif for( ; j < n; j++ ) { float t = a[j] - b[j]; d += t*t; } return d; }
int operator () (const schar * src0, const uchar * mask, int * dst, int len, int cn) const { if (mask || (cn != 1 && cn != 2 && cn != 4)) return 0; len *= cn; int x = 0; v_int32 v_sum = vx_setzero_s32(); int len0 = len & -v_int8::nlanes; while (x < len0) { const int len_tmp = min(x + 256*v_int16::nlanes, len0); v_int16 v_sum16 = vx_setzero_s16(); for (; x < len_tmp; x += v_int8::nlanes) { v_int16 v_src0, v_src1; v_expand(vx_load(src0 + x), v_src0, v_src1); v_sum16 += v_src0 + v_src1; } v_int32 v_half0, v_half1; v_expand(v_sum16, v_half0, v_half1); v_sum += v_half0 + v_half1; } if (x <= len - v_int16::nlanes) { v_int32 v_half0, v_half1; v_expand(vx_load_expand(src0 + x), v_half0, v_half1); v_sum += v_half0 + v_half1; x += v_int16::nlanes; } if (x <= len - v_int32::nlanes) { v_sum += vx_load_expand_q(src0 + x); x += v_int32::nlanes; } if (cn == 1) *dst += v_reduce_sum(v_sum); else { int32_t CV_DECL_ALIGNED(CV_SIMD_WIDTH) ar[v_int32::nlanes]; v_store_aligned(ar, v_sum); for (int i = 0; i < v_int32::nlanes; ++i) dst[i % cn] += ar[i]; } v_cleanup(); return x / cn; }
void log64f( const double *x, double *y, int n ) { CV_INSTRUMENT_REGION(); const double* const logTab = cv::details::getLogTab64f(); const int64 LOGTAB_MASK2_64F = ((int64)1 << (52 - LOGTAB_SCALE)) - 1; const double A7 = 1.0, A6 = -0.5, A5 = 0.333333333333333314829616256247390992939472198486328125, A4 = -0.25, A3 = 0.2, A2 = -0.1666666666666666574148081281236954964697360992431640625, A1 = 0.1428571428571428769682682968777953647077083587646484375, A0 = -0.125; int i = 0; #if CV_SIMD_64F const int VECSZ = v_float64::nlanes; const v_float64 vln2 = vx_setall_f64(ln_2); const v_float64 vA0 = vx_setall_f64(A0), vA1 = vx_setall_f64(A1), vA2 = vx_setall_f64(A2), vA3 = vx_setall_f64(A3), vA4 = vx_setall_f64(A4), vA5 = vx_setall_f64(A5), vA6 = vx_setall_f64(A6), vA7 = vx_setall_f64(A7); for( ; i < n; i += VECSZ ) { if( i + VECSZ > n ) { if( i == 0 || x == y ) break; i = n - VECSZ; } v_int64 h0 = vx_load((const int64*)x + i); v_int32 yi0 = v_pack(v_shr<52>(h0), vx_setzero_s64()); yi0 = (yi0 & vx_setall_s32(0x7ff)) - vx_setall_s32(1023); v_int64 xi0 = (h0 & vx_setall_s64(LOGTAB_MASK2_64F)) | vx_setall_s64((int64)1023 << 52); h0 = v_shr<52 - LOGTAB_SCALE - 1>(h0); v_int32 idx = v_pack(h0, h0) & vx_setall_s32(LOGTAB_MASK*2); v_float64 xf0, yf0; v_lut_deinterleave(logTab, idx, yf0, xf0); yf0 = v_fma(v_cvt_f64(yi0), vln2, yf0); v_float64 delta = v_cvt_f64(idx == vx_setall_s32(510))*vx_setall_f64(1./512); xf0 = v_fma(v_reinterpret_as_f64(xi0) - vx_setall_f64(1.), xf0, delta); v_float64 xq = xf0*xf0; v_float64 zf0 = v_fma(xq, vA0, vA2); v_float64 zf1 = v_fma(xq, vA1, vA3); zf0 = v_fma(zf0, xq, vA4); zf1 = v_fma(zf1, xq, vA5); zf0 = v_fma(zf0, xq, vA6); zf1 = v_fma(zf1, xq, vA7); zf1 = v_fma(zf1, xf0, yf0); zf0 = v_fma(zf0, xq, zf1); v_store(y + i, zf0); } #endif for( ; i < n; i++ ) { Cv64suf buf; int64 i0 = ((const int64*)x)[i]; buf.i = (i0 & LOGTAB_MASK2_64F) | ((int64)1023 << 52); int idx = (int)(i0 >> (52 - LOGTAB_SCALE - 1)) & (LOGTAB_MASK*2); double y0 = (((int)(i0 >> 52) & 0x7ff) - 1023) * ln_2 + logTab[idx]; double x0 = (buf.f - 1.)*logTab[idx + 1] + (idx == 510 ? -1./512 : 0.); double xq = x0*x0; y[i] = (((A0*xq + A2)*xq + A4)*xq + A6)*xq + (((A1*xq + A3)*xq + A5)*xq + A7)*x0 + y0; } }
void log32f( const float *_x, float *y, int n ) { CV_INSTRUMENT_REGION(); const float* const logTab_f = cv::details::getLogTab32f(); const int LOGTAB_MASK2_32F = (1 << (23 - LOGTAB_SCALE)) - 1; const float A0 = 0.3333333333333333333333333f, A1 = -0.5f, A2 = 1.f; int i = 0; const int* x = (const int*)_x; #if CV_SIMD const int VECSZ = v_float32::nlanes; const v_float32 vln2 = vx_setall_f32((float)ln_2); const v_float32 v1 = vx_setall_f32(1.f); const v_float32 vshift = vx_setall_f32(-1.f/512); const v_float32 vA0 = vx_setall_f32(A0); const v_float32 vA1 = vx_setall_f32(A1); const v_float32 vA2 = vx_setall_f32(A2); for( ; i < n; i += VECSZ ) { if( i + VECSZ > n ) { if( i == 0 || _x == y ) break; i = n - VECSZ; } v_int32 h0 = vx_load(x + i); v_int32 yi0 = (v_shr<23>(h0) & vx_setall_s32(255)) - vx_setall_s32(127); v_int32 xi0 = (h0 & vx_setall_s32(LOGTAB_MASK2_32F)) | vx_setall_s32(127 << 23); h0 = v_shr<23 - LOGTAB_SCALE - 1>(h0) & vx_setall_s32(LOGTAB_MASK*2); v_float32 yf0, xf0; v_lut_deinterleave(logTab_f, h0, yf0, xf0); yf0 = v_fma(v_cvt_f32(yi0), vln2, yf0); v_float32 delta = v_reinterpret_as_f32(h0 == vx_setall_s32(510)) & vshift; xf0 = v_fma((v_reinterpret_as_f32(xi0) - v1), xf0, delta); v_float32 zf0 = v_fma(xf0, vA0, vA1); zf0 = v_fma(zf0, xf0, vA2); zf0 = v_fma(zf0, xf0, yf0); v_store(y + i, zf0); } vx_cleanup(); #endif for( ; i < n; i++ ) { Cv32suf buf; int i0 = x[i]; buf.i = (i0 & LOGTAB_MASK2_32F) | (127 << 23); int idx = (i0 >> (23 - LOGTAB_SCALE - 1)) & (LOGTAB_MASK*2); float y0 = (((i0 >> 23) & 0xff) - 127) * (float)ln_2 + logTab_f[idx]; float x0 = (buf.f - 1.f)*logTab_f[idx + 1] + (idx == 510 ? -1.f/512 : 0.f); y[i] = ((A0*x0 + A1)*x0 + A2)*x0 + y0; } }
void exp64f( const double *_x, double *y, int n ) { CV_INSTRUMENT_REGION(); const double* const expTab = cv::details::getExpTab64f(); const double A5 = .99999999999999999998285227504999 / EXPPOLY_32F_A0, A4 = .69314718055994546743029643825322 / EXPPOLY_32F_A0, A3 = .24022650695886477918181338054308 / EXPPOLY_32F_A0, A2 = .55504108793649567998466049042729e-1 / EXPPOLY_32F_A0, A1 = .96180973140732918010002372686186e-2 / EXPPOLY_32F_A0, A0 = .13369713757180123244806654839424e-2 / EXPPOLY_32F_A0; int i = 0; const Cv64suf* x = (const Cv64suf*)_x; double minval = (-exp_max_val/exp_prescale); double maxval = (exp_max_val/exp_prescale); #if CV_SIMD_64F const int VECSZ = v_float64::nlanes; const v_float64 vprescale = vx_setall_f64(exp_prescale); const v_float64 vpostscale = vx_setall_f64(exp_postscale); const v_float64 vminval = vx_setall_f64(minval); const v_float64 vmaxval = vx_setall_f64(maxval); const v_float64 vA1 = vx_setall_f64(A1); const v_float64 vA2 = vx_setall_f64(A2); const v_float64 vA3 = vx_setall_f64(A3); const v_float64 vA4 = vx_setall_f64(A4); const v_float64 vA5 = vx_setall_f64(A5); const v_int32 vidxmask = vx_setall_s32(EXPTAB_MASK); bool y_aligned = (size_t)(void*)y % 32 == 0; for( ; i < n; i += VECSZ*2 ) { if( i + VECSZ*2 > n ) { if( i == 0 || _x == y ) break; i = n - VECSZ*2; y_aligned = false; } v_float64 xf0 = vx_load(&x[i].f), xf1 = vx_load(&x[i + VECSZ].f); xf0 = v_min(v_max(xf0, vminval), vmaxval); xf1 = v_min(v_max(xf1, vminval), vmaxval); xf0 *= vprescale; xf1 *= vprescale; v_int32 xi0 = v_round(xf0); v_int32 xi1 = v_round(xf1); xf0 = (xf0 - v_cvt_f64(xi0))*vpostscale; xf1 = (xf1 - v_cvt_f64(xi1))*vpostscale; v_float64 yf0 = v_lut(expTab, xi0 & vidxmask); v_float64 yf1 = v_lut(expTab, xi1 & vidxmask); v_int32 v0 = vx_setzero_s32(), v1023 = vx_setall_s32(1023), v2047 = vx_setall_s32(2047); xi0 = v_min(v_max(v_shr<EXPTAB_SCALE>(xi0) + v1023, v0), v2047); xi1 = v_min(v_max(v_shr<EXPTAB_SCALE>(xi1) + v1023, v0), v2047); v_int64 xq0, xq1, dummy; v_expand(xi0, xq0, dummy); v_expand(xi1, xq1, dummy); yf0 *= v_reinterpret_as_f64(v_shl<52>(xq0)); yf1 *= v_reinterpret_as_f64(v_shl<52>(xq1)); v_float64 zf0 = xf0 + vA1; v_float64 zf1 = xf1 + vA1; zf0 = v_fma(zf0, xf0, vA2); zf1 = v_fma(zf1, xf1, vA2); zf0 = v_fma(zf0, xf0, vA3); zf1 = v_fma(zf1, xf1, vA3); zf0 = v_fma(zf0, xf0, vA4); zf1 = v_fma(zf1, xf1, vA4); zf0 = v_fma(zf0, xf0, vA5); zf1 = v_fma(zf1, xf1, vA5); zf0 *= yf0; zf1 *= yf1; if( y_aligned ) { v_store_aligned(y + i, zf0); v_store_aligned(y + i + VECSZ, zf1); } else { v_store(y + i, zf0); v_store(y + i + VECSZ, zf1); } } vx_cleanup(); #endif for( ; i < n; i++ ) { double x0 = x[i].f; x0 = std::min(std::max(x0, minval), maxval); x0 *= exp_prescale; Cv64suf buf; int xi = saturate_cast<int>(x0); x0 = (x0 - xi)*exp_postscale; int t = (xi >> EXPTAB_SCALE) + 1023; t = !(t & ~2047) ? t : t < 0 ? 0 : 2047; buf.i = (int64)t << 52; y[i] = buf.f * expTab[xi & EXPTAB_MASK] * (((((A0*x0 + A1)*x0 + A2)*x0 + A3)*x0 + A4)*x0 + A5); } }
void exp32f( const float *_x, float *y, int n ) { CV_INSTRUMENT_REGION(); const float* const expTab_f = cv::details::getExpTab32f(); const float A4 = (float)(1.000000000000002438532970795181890933776 / EXPPOLY_32F_A0), A3 = (float)(.6931471805521448196800669615864773144641 / EXPPOLY_32F_A0), A2 = (float)(.2402265109513301490103372422686535526573 / EXPPOLY_32F_A0), A1 = (float)(.5550339366753125211915322047004666939128e-1 / EXPPOLY_32F_A0); int i = 0; const Cv32suf* x = (const Cv32suf*)_x; float minval = (float)(-exp_max_val/exp_prescale); float maxval = (float)(exp_max_val/exp_prescale); float postscale = (float)exp_postscale; #if CV_SIMD const int VECSZ = v_float32::nlanes; const v_float32 vprescale = vx_setall_f32((float)exp_prescale); const v_float32 vpostscale = vx_setall_f32((float)exp_postscale); const v_float32 vminval = vx_setall_f32(minval); const v_float32 vmaxval = vx_setall_f32(maxval); const v_float32 vA1 = vx_setall_f32((float)A1); const v_float32 vA2 = vx_setall_f32((float)A2); const v_float32 vA3 = vx_setall_f32((float)A3); const v_float32 vA4 = vx_setall_f32((float)A4); const v_int32 vidxmask = vx_setall_s32(EXPTAB_MASK); bool y_aligned = (size_t)(void*)y % 32 == 0; for( ; i < n; i += VECSZ*2 ) { if( i + VECSZ*2 > n ) { if( i == 0 || _x == y ) break; i = n - VECSZ*2; y_aligned = false; } v_float32 xf0 = vx_load(&x[i].f), xf1 = vx_load(&x[i + VECSZ].f); xf0 = v_min(v_max(xf0, vminval), vmaxval); xf1 = v_min(v_max(xf1, vminval), vmaxval); xf0 *= vprescale; xf1 *= vprescale; v_int32 xi0 = v_round(xf0); v_int32 xi1 = v_round(xf1); xf0 = (xf0 - v_cvt_f32(xi0))*vpostscale; xf1 = (xf1 - v_cvt_f32(xi1))*vpostscale; v_float32 yf0 = v_lut(expTab_f, xi0 & vidxmask); v_float32 yf1 = v_lut(expTab_f, xi1 & vidxmask); v_int32 v0 = vx_setzero_s32(), v127 = vx_setall_s32(127), v255 = vx_setall_s32(255); xi0 = v_min(v_max(v_shr<EXPTAB_SCALE>(xi0) + v127, v0), v255); xi1 = v_min(v_max(v_shr<EXPTAB_SCALE>(xi1) + v127, v0), v255); yf0 *= v_reinterpret_as_f32(v_shl<23>(xi0)); yf1 *= v_reinterpret_as_f32(v_shl<23>(xi1)); v_float32 zf0 = xf0 + vA1; v_float32 zf1 = xf1 + vA1; zf0 = v_fma(zf0, xf0, vA2); zf1 = v_fma(zf1, xf1, vA2); zf0 = v_fma(zf0, xf0, vA3); zf1 = v_fma(zf1, xf1, vA3); zf0 = v_fma(zf0, xf0, vA4); zf1 = v_fma(zf1, xf1, vA4); zf0 *= yf0; zf1 *= yf1; if( y_aligned ) { v_store_aligned(y + i, zf0); v_store_aligned(y + i + VECSZ, zf1); } else { v_store(y + i, zf0); v_store(y + i + VECSZ, zf1); } } vx_cleanup(); #endif for( ; i < n; i++ ) { float x0 = x[i].f; x0 = std::min(std::max(x0, minval), maxval); x0 *= (float)exp_prescale; Cv32suf buf; int xi = saturate_cast<int>(x0); x0 = (x0 - xi)*postscale; int t = (xi >> EXPTAB_SCALE) + 127; t = !(t & ~255) ? t : t < 0 ? 0 : 255; buf.i = t << 23; y[i] = buf.f * expTab_f[xi & EXPTAB_MASK] * ((((x0 + A1)*x0 + A2)*x0 + A3)*x0 + A4); } }
/* The trick with STORE_UNALIGNED/STORE_ALIGNED_NOCACHE is the following: on IA there are instructions movntps and such to which v_store_interleave(...., STORE_ALIGNED_NOCACHE) is mapped. Those instructions write directly into memory w/o touching cache that results in dramatic speed improvements, especially on large arrays (FullHD, 4K etc.). Those intrinsics require the destination address to be aligned by 16/32 bits (with SSE2 and AVX2, respectively). So we potentially split the processing into 3 stages: 1) the optional prefix part [0:i0), where we use simple unaligned stores. 2) the optional main part [i0:len - VECSZ], where we use "nocache" mode. But in some cases we have to use unaligned stores in this part. 3) the optional suffix part (the tail) (len - VECSZ:len) where we switch back to "unaligned" mode to process the remaining len - VECSZ elements. In principle there can be very poorly aligned data where there is no main part. For that we set i0=0 and use unaligned stores for the whole array. */ template<typename T, typename VecT> static void vecmerge_( const T** src, T* dst, int len, int cn ) { const int VECSZ = VecT::nlanes; int i, i0 = 0; const T* src0 = src[0]; const T* src1 = src[1]; const int dstElemSize = cn * sizeof(T); int r = (int)((size_t)(void*)dst % (VECSZ*sizeof(T))); hal::StoreMode mode = hal::STORE_ALIGNED_NOCACHE; if( r != 0 ) { mode = hal::STORE_UNALIGNED; if (r % dstElemSize == 0 && len > VECSZ*2) i0 = VECSZ - (r / dstElemSize); } if( cn == 2 ) { for( i = 0; i < len; i += VECSZ ) { if( i > len - VECSZ ) { i = len - VECSZ; mode = hal::STORE_UNALIGNED; } VecT a = vx_load(src0 + i), b = vx_load(src1 + i); v_store_interleave(dst + i*cn, a, b, mode); if( i < i0 ) { i = i0 - VECSZ; mode = hal::STORE_ALIGNED_NOCACHE; } } } else if( cn == 3 ) { const T* src2 = src[2]; for( i = 0; i < len; i += VECSZ ) { if( i > len - VECSZ ) { i = len - VECSZ; mode = hal::STORE_UNALIGNED; } VecT a = vx_load(src0 + i), b = vx_load(src1 + i), c = vx_load(src2 + i); v_store_interleave(dst + i*cn, a, b, c, mode); if( i < i0 ) { i = i0 - VECSZ; mode = hal::STORE_ALIGNED_NOCACHE; } } } else { CV_Assert( cn == 4 ); const T* src2 = src[2]; const T* src3 = src[3]; for( i = 0; i < len; i += VECSZ ) { if( i > len - VECSZ ) { i = len - VECSZ; mode = hal::STORE_UNALIGNED; } VecT a = vx_load(src0 + i), b = vx_load(src1 + i); VecT c = vx_load(src2 + i), d = vx_load(src3 + i); v_store_interleave(dst + i*cn, a, b, c, d, mode); if( i < i0 ) { i = i0 - VECSZ; mode = hal::STORE_ALIGNED_NOCACHE; } } } vx_cleanup(); }