/*! * \brief Multiply the two given vectors of byte */ ETL_STATIC_INLINE(avx_simd_byte) mul(avx_simd_byte lhs, avx_simd_byte rhs) { auto aodd = _mm256_srli_epi16(lhs.value, 8); auto bodd = _mm256_srli_epi16(rhs.value, 8); auto muleven = _mm256_mullo_epi16(lhs.value, rhs.value); auto mulodd = _mm256_slli_epi16(_mm256_mullo_epi16(aodd, bodd), 8); return _mm256_blendv_epi8(mulodd, muleven, _mm256_set1_epi32(0x00FF00FF)); }
static INLINE __m256i calc_mask_d16_avx2(const __m256i *data_src0, const __m256i *data_src1, const __m256i *round_const, const __m256i *mask_base_16, const __m256i *clip_diff, int round) { const __m256i diffa = _mm256_subs_epu16(*data_src0, *data_src1); const __m256i diffb = _mm256_subs_epu16(*data_src1, *data_src0); const __m256i diff = _mm256_max_epu16(diffa, diffb); const __m256i diff_round = _mm256_srli_epi16(_mm256_adds_epu16(diff, *round_const), round); const __m256i diff_factor = _mm256_srli_epi16(diff_round, DIFF_FACTOR_LOG2); const __m256i diff_mask = _mm256_adds_epi16(diff_factor, *mask_base_16); const __m256i diff_clamp = _mm256_min_epi16(diff_mask, *clip_diff); return diff_clamp; }
static INLINE __m256i calc_mask_avx2(const __m256i mask_base, const __m256i s0, const __m256i s1) { const __m256i diff = _mm256_abs_epi16(_mm256_sub_epi16(s0, s1)); return _mm256_abs_epi16( _mm256_add_epi16(mask_base, _mm256_srli_epi16(diff, 4))); // clamp(diff, 0, 64) can be skiped for diff is always in the range ( 38, 54) }
void maddrc16_imul_avx2(uint8_t* region1, const uint8_t* region2, uint8_t constant, size_t length) { uint8_t *end; register __m256i reg1, reg2, ri[4], sp[4], mi[4]; const uint8_t *p = pt[constant]; if (constant == 0) return; if (constant == 1) { xorr_avx2(region1, region2, length); return; } mi[0] = _mm256_set1_epi8(0x11); mi[1] = _mm256_set1_epi8(0x22); mi[2] = _mm256_set1_epi8(0x44); mi[3] = _mm256_set1_epi8(0x88); sp[0] = _mm256_set1_epi16(p[0]); sp[1] = _mm256_set1_epi16(p[1]); sp[2] = _mm256_set1_epi16(p[2]); sp[3] = _mm256_set1_epi16(p[3]); for (end=region1+length; region1<end; region1+=32, region2+=32) { reg2 = _mm256_load_si256((void *)region2); reg1 = _mm256_load_si256((void *)region1); ri[0] = _mm256_and_si256(reg2, mi[0]); ri[1] = _mm256_and_si256(reg2, mi[1]); ri[2] = _mm256_and_si256(reg2, mi[2]); ri[3] = _mm256_and_si256(reg2, mi[3]); ri[1] = _mm256_srli_epi16(ri[1], 1); ri[2] = _mm256_srli_epi16(ri[2], 2); ri[3] = _mm256_srli_epi16(ri[3], 3); ri[0] = _mm256_mullo_epi16(ri[0], sp[0]); ri[1] = _mm256_mullo_epi16(ri[1], sp[1]); ri[2] = _mm256_mullo_epi16(ri[2], sp[2]); ri[3] = _mm256_mullo_epi16(ri[3], sp[3]); ri[0] = _mm256_xor_si256(ri[0], ri[1]); ri[2] = _mm256_xor_si256(ri[2], ri[3]); ri[0] = _mm256_xor_si256(ri[0], ri[2]); ri[0] = _mm256_xor_si256(ri[0], reg1); _mm256_store_si256((void *)region1, ri[0]); } }
// 32bpp optimized for 8-bit ARGB/RGBA. rmask should be 0x00FF,0x00FF,... etc static inline __m256i stretchblt_line_bilinear_pixel_blend_avx_argb8(const __m256i cur,const __m256i nxt,const __m256i mul,const __m256i rmask) { __m256i rc,gc; __m256i rn,gn; __m256i d,sum; rc = _mm256_and_si256( cur ,rmask); gc = _mm256_and_si256(_mm256_srli_epi16(cur,8),rmask); rn = _mm256_and_si256( nxt ,rmask); gn = _mm256_and_si256(_mm256_srli_epi16(nxt,8),rmask); d = _mm256_sub_epi16(rn,rc); sum = _mm256_add_epi16(rc,_mm256_mulhi_epi16(_mm256_add_epi16(d,d),mul)); d = _mm256_sub_epi16(gn,gc); sum = _mm256_add_epi16(_mm256_slli_epi16(_mm256_add_epi16(gc,_mm256_mulhi_epi16(_mm256_add_epi16(d,d),mul)),8),sum); return sum; }
static __m256i avx2_popcount(const __m256i vec) { const __m256i lookup = _mm256_setr_epi8( /* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2, /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3, /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3, /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4, /* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2, /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3, /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3, /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4 ); const __m256i low_mask = _mm256_set1_epi8(0x0f); const __m256i lo = _mm256_and_si256(vec, low_mask); const __m256i hi = _mm256_and_si256(_mm256_srli_epi16(vec, 4), low_mask); const __m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo); const __m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi); return _mm256_add_epi8(popcnt1, popcnt2); }
// 16bpp general R/G/B, usually 5/6/5 or 5/5/5 static inline __m256i stretchblt_line_bilinear_pixel_blend_avx_rgb16(const __m256i cur,const __m256i nxt,const __m256i mul,const __m256i rmask,const uint16_t rshift,const __m256i gmask,const uint16_t gshift,const __m256i bmask,const uint16_t bshift) { __m256i rc,gc,bc; __m256i rn,gn,bn; __m256i d,sum; rc = _mm256_and_si256(_mm256_srli_epi16(cur,rshift),rmask); gc = _mm256_and_si256(_mm256_srli_epi16(cur,gshift),gmask); bc = _mm256_and_si256(_mm256_srli_epi16(cur,bshift),bmask); rn = _mm256_and_si256(_mm256_srli_epi16(nxt,rshift),rmask); gn = _mm256_and_si256(_mm256_srli_epi16(nxt,gshift),gmask); bn = _mm256_and_si256(_mm256_srli_epi16(nxt,bshift),bmask); d = _mm256_sub_epi16(rn,rc); sum = _mm256_slli_epi16(_mm256_add_epi16(rc,_mm256_mulhi_epi16(_mm256_add_epi16(d,d),mul)),rshift); d = _mm256_sub_epi16(gn,gc); sum = _mm256_add_epi16(_mm256_slli_epi16(_mm256_add_epi16(gc,_mm256_mulhi_epi16(_mm256_add_epi16(d,d),mul)),gshift),sum); d = _mm256_sub_epi16(bn,bc); sum = _mm256_add_epi16(_mm256_slli_epi16(_mm256_add_epi16(bc,_mm256_mulhi_epi16(_mm256_add_epi16(d,d),mul)),bshift),sum); return sum; }
void extern avx2_test (void) { x = _mm256_srli_epi16 (x, 13); }
SIMD_INLINE __m256i Average16(const __m256i & s0, const __m256i & s1) { return _mm256_srli_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(s0, K8_01), _mm256_maddubs_epi16(s1, K8_01)), K16_0002), 2); }
SIMD_INLINE __m256i Average16(const __m256i & s0, const __m256i & s1) { return _mm256_srli_epi16(_mm256_add_epi16(_mm256_add_epi16( _mm256_hadd_epi16(_mm256_unpacklo_epi8(s0, K_ZERO), _mm256_unpackhi_epi8(s0, K_ZERO)), _mm256_hadd_epi16(_mm256_unpacklo_epi8(s1, K_ZERO), _mm256_unpackhi_epi8(s1, K_ZERO))), K16_0002), 2); }
template <> SIMD_INLINE __m256i DivideBy256<true>(__m256i value) { return _mm256_srli_epi16(_mm256_add_epi16(value, K16_0080), 8); }
static int make_frame_planar_yuv_stacked ( lw_video_output_handler_t *vohp, int height, AVFrame *av_frame, PVideoFrame &as_frame ) { as_picture_t dst_picture = { { { NULL } } }; as_picture_t src_picture = { { { NULL } } }; as_assign_planar_yuv( as_frame, &dst_picture ); lw_video_scaler_handler_t *vshp = &vohp->scaler; as_video_output_handler_t *as_vohp = (as_video_output_handler_t *)vohp->private_handler; if( vshp->input_pixel_format == vshp->output_pixel_format ) for( int i = 0; i < 3; i++ ) { src_picture.data [i] = av_frame->data [i]; src_picture.linesize[i] = av_frame->linesize[i]; } else { if( convert_av_pixel_format( vshp->sws_ctx, height, av_frame, &as_vohp->scaled ) < 0 ) return -1; src_picture = as_vohp->scaled; } for( int i = 0; i < 3; i++ ) { const int src_height = height >> (i ? as_vohp->sub_height : 0); const int width = vshp->input_width >> (i ? as_vohp->sub_width : 0); const int width16 = sse2_available > 0 ? (width & ~15) : 0; const int width32 = avx2_available > 0 ? (width & ~31) : 0; const int lsb_offset = src_height * dst_picture.linesize[i]; for( int j = 0; j < src_height; j++ ) { /* Here, if available, use SIMD instructions. * Note: There is assumption that the address of a given data can be divided by 32 or 16. * The destination is always 32 byte alignment unless AviSynth legacy alignment is used. * The source is not always 32 or 16 byte alignment if the frame buffer is from libavcodec directly. */ static const uint8_t LW_ALIGN(32) sp16[32] = { /* saturation protector * For setting all upper 8 bits to zero so that saturation won't make sense. */ 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00 ,0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00 ,0xFF, 0x00, 0xFF, 0x00 }; uint8_t *dst = dst_picture.data[i] + j * dst_picture.linesize[i]; /* MSB: dst + k, LSB: dst + k + lsb_offset */ const uint8_t *src = src_picture.data[i] + j * src_picture.linesize[i]; /* MSB: src + 2 * k + 1, LSB: src + 2 * k */ const int _width16 = ((intptr_t)src & 15) == 0 ? width16 : 0; /* Don't use SSE2 instructions if set to 0. */ const int _width32 = ((intptr_t)src & 31) == 0 ? width32 : 0; /* Don't use AVX(2) instructions if set to 0. */ #if VC_HAS_AVX2 /* AVX, AVX2 */ for( int k = 0; k < _width32; k += 32 ) { __m256i ymm0 = _mm256_load_si256( (__m256i *)(src + 2 * k ) ); __m256i ymm1 = _mm256_load_si256( (__m256i *)(src + 2 * k + 32) ); __m256i mask = _mm256_load_si256( (__m256i *)sp16 ); __m256i ymm2 = _mm256_packus_epi16( _mm256_and_si256 ( ymm0, mask ), _mm256_and_si256 ( ymm1, mask ) ); __m256i ymm3 = _mm256_packus_epi16( _mm256_srli_epi16( ymm0, 8 ), _mm256_srli_epi16( ymm1, 8 ) ); _mm256_store_si256( (__m256i *)(dst + k + lsb_offset), _mm256_permute4x64_epi64( ymm2, _MM_SHUFFLE( 3, 1, 2, 0 ) ) ); _mm256_store_si256( (__m256i *)(dst + k ), _mm256_permute4x64_epi64( ymm3, _MM_SHUFFLE( 3, 1, 2, 0 ) ) ); } #endif /* SSE2 */ for( int k = _width32; k < _width16; k += 16 ) { __m128i xmm0 = _mm_load_si128( (__m128i *)(src + 2 * k ) ); __m128i xmm1 = _mm_load_si128( (__m128i *)(src + 2 * k + 16) ); __m128i mask = _mm_load_si128( (__m128i *)sp16 ); _mm_store_si128( (__m128i *)(dst + k + lsb_offset), _mm_packus_epi16( _mm_and_si128 ( xmm0, mask ), _mm_and_si128 ( xmm1, mask ) ) ); _mm_store_si128( (__m128i *)(dst + k ), _mm_packus_epi16( _mm_srli_epi16( xmm0, 8 ), _mm_srli_epi16( xmm1, 8 ) ) ); } for( int k = _width16; k < width; k++ ) { *(dst + k + lsb_offset) = *(src + 2 * k ); *(dst + k ) = *(src + 2 * k + 1); } } } return 0; }
void vec_i8_cnt_dosage2(const int8_t *p, int8_t *out, size_t n, int8_t val, int8_t missing, int8_t missing_substitute) { #ifdef COREARRAY_SIMD_SSE2 // header 1, 16-byte aligned size_t h = (16 - ((size_t)out & 0x0F)) & 0x0F; for (; (n > 0) && (h > 0); n--, h--, p+=2) { *out ++ = ((p[0] == missing) || (p[1] == missing)) ? missing_substitute : (p[0]==val ? 1 : 0) + (p[1]==val ? 1 : 0); } // body, SSE2 const __m128i val16 = _mm_set1_epi8(val); const __m128i miss16 = _mm_set1_epi8(missing); const __m128i sub16 = _mm_set1_epi8(missing_substitute); const __m128i mask = _mm_set1_epi16(0x00FF); # ifdef COREARRAY_SIMD_AVX2 // header 2, 32-byte aligned if ((n >= 16) && ((size_t)out & 0x10)) { __m128i w1 = MM_LOADU_128((__m128i const*)p); p += 16; __m128i w2 = MM_LOADU_128((__m128i const*)p); p += 16; __m128i v1 = _mm_packus_epi16(_mm_and_si128(w1, mask), _mm_and_si128(w2, mask)); __m128i v2 = _mm_packus_epi16(_mm_srli_epi16(w1, 8), _mm_srli_epi16(w2, 8)); __m128i c = _mm_setzero_si128(); c = _mm_sub_epi8(c, _mm_cmpeq_epi8(v1, val16)); c = _mm_sub_epi8(c, _mm_cmpeq_epi8(v2, val16)); w1 = _mm_cmpeq_epi8(v1, miss16); w2 = _mm_cmpeq_epi8(v2, miss16); __m128i w = _mm_or_si128(w1, w2); c = _mm_or_si128(_mm_and_si128(w, sub16), _mm_andnot_si128(w, c)); _mm_store_si128((__m128i *)out, c); n -= 16; out += 16; } const __m256i val32 = _mm256_set1_epi8(val); const __m256i miss32 = _mm256_set1_epi8(missing); const __m256i sub32 = _mm256_set1_epi8(missing_substitute); const __m256i mask2 = _mm256_set1_epi16(0x00FF); for (; n >= 32; n-=32) { __m256i w1 = MM_LOADU_256((__m256i const*)p); p += 32; __m256i w2 = MM_LOADU_256((__m256i const*)p); p += 32; __m256i v1 = _mm256_packus_epi16(_mm256_and_si256(w1, mask2), _mm256_and_si256(w2, mask2)); __m256i v2 = _mm256_packus_epi16(_mm256_srli_epi16(w1, 8), _mm256_srli_epi16(w2, 8)); __m256i c = _mm256_setzero_si256(); c = _mm256_sub_epi8(c, _mm256_cmpeq_epi8(v1, val32)); c = _mm256_sub_epi8(c, _mm256_cmpeq_epi8(v2, val32)); w1 = _mm256_cmpeq_epi8(v1, miss32); w2 = _mm256_cmpeq_epi8(v2, miss32); __m256i w = _mm256_or_si256(w1, w2); c = _mm256_or_si256(_mm256_and_si256(w, sub32), _mm256_andnot_si256(w, c)); c = _mm256_permute4x64_epi64(c, 0xD8); _mm256_store_si256((__m256i *)out, c); out += 32; } # endif // SSE2 only for (; n >= 16; n-=16) { __m128i w1 = MM_LOADU_128((__m128i const*)p); p += 16; __m128i w2 = MM_LOADU_128((__m128i const*)p); p += 16; __m128i v1 = _mm_packus_epi16(_mm_and_si128(w1, mask), _mm_and_si128(w2, mask)); __m128i v2 = _mm_packus_epi16(_mm_srli_epi16(w1, 8), _mm_srli_epi16(w2, 8)); __m128i c = _mm_setzero_si128(); c = _mm_sub_epi8(c, _mm_cmpeq_epi8(v1, val16)); c = _mm_sub_epi8(c, _mm_cmpeq_epi8(v2, val16)); w1 = _mm_cmpeq_epi8(v1, miss16); w2 = _mm_cmpeq_epi8(v2, miss16); __m128i w = _mm_or_si128(w1, w2); c = _mm_or_si128(_mm_and_si128(w, sub16), _mm_andnot_si128(w, c)); _mm_store_si128((__m128i *)out, c); out += 16; } #endif // tail for (; n > 0; n--, p+=2) { *out ++ = ((p[0] == missing) || (p[1] == missing)) ? missing_substitute : (p[0]==val ? 1 : 0) + (p[1]==val ? 1 : 0); } }
static void mb_lpf_horizontal_edge_w_avx2_16(unsigned char *s, int p, const unsigned char *_blimit, const unsigned char *_limit, const unsigned char *_thresh) { __m128i mask, hev, flat, flat2; const __m128i zero = _mm_set1_epi16(0); const __m128i one = _mm_set1_epi8(1); __m128i p7, p6, p5; __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4; __m128i q5, q6, q7; __m256i p256_7, q256_7, p256_6, q256_6, p256_5, q256_5, p256_4, q256_4, p256_3, q256_3, p256_2, q256_2, p256_1, q256_1, p256_0, q256_0; const __m128i thresh = _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_thresh[0])); const __m128i limit = _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_limit[0])); const __m128i blimit = _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_blimit[0])); p256_4 = _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 5 * p))); p256_3 = _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 4 * p))); p256_2 = _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 3 * p))); p256_1 = _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 2 * p))); p256_0 = _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 1 * p))); q256_0 = _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 0 * p))); q256_1 = _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 1 * p))); q256_2 = _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 2 * p))); q256_3 = _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 3 * p))); q256_4 = _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 4 * p))); p4 = _mm256_castsi256_si128(p256_4); p3 = _mm256_castsi256_si128(p256_3); p2 = _mm256_castsi256_si128(p256_2); p1 = _mm256_castsi256_si128(p256_1); p0 = _mm256_castsi256_si128(p256_0); q0 = _mm256_castsi256_si128(q256_0); q1 = _mm256_castsi256_si128(q256_1); q2 = _mm256_castsi256_si128(q256_2); q3 = _mm256_castsi256_si128(q256_3); q4 = _mm256_castsi256_si128(q256_4); { const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0), _mm_subs_epu8(p0, p1)); const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0), _mm_subs_epu8(q0, q1)); const __m128i fe = _mm_set1_epi8(0xfe); const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0); __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0), _mm_subs_epu8(q0, p0)); __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu8(p1, q1), _mm_subs_epu8(q1, p1)); __m128i work; flat = _mm_max_epu8(abs_p1p0, abs_q1q0); hev = _mm_subs_epu8(flat, thresh); hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0); abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit); mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1; mask = _mm_max_epu8(flat, mask); // mask |= (abs(p1 - p0) > limit) * -1; // mask |= (abs(q1 - q0) > limit) * -1; work = _mm_max_epu8( _mm_or_si128(_mm_subs_epu8(p2, p1), _mm_subs_epu8(p1, p2)), _mm_or_si128(_mm_subs_epu8(p3, p2), _mm_subs_epu8(p2, p3))); mask = _mm_max_epu8(work, mask); work = _mm_max_epu8( _mm_or_si128(_mm_subs_epu8(q2, q1), _mm_subs_epu8(q1, q2)), _mm_or_si128(_mm_subs_epu8(q3, q2), _mm_subs_epu8(q2, q3))); mask = _mm_max_epu8(work, mask); mask = _mm_subs_epu8(mask, limit); mask = _mm_cmpeq_epi8(mask, zero); } // lp filter { const __m128i t4 = _mm_set1_epi8(4); const __m128i t3 = _mm_set1_epi8(3); const __m128i t80 = _mm_set1_epi8(0x80); const __m128i te0 = _mm_set1_epi8(0xe0); const __m128i t1f = _mm_set1_epi8(0x1f); const __m128i t1 = _mm_set1_epi8(0x1); const __m128i t7f = _mm_set1_epi8(0x7f); __m128i ps1 = _mm_xor_si128(p1, t80); __m128i ps0 = _mm_xor_si128(p0, t80); __m128i qs0 = _mm_xor_si128(q0, t80); __m128i qs1 = _mm_xor_si128(q1, t80); __m128i filt; __m128i work_a; __m128i filter1, filter2; __m128i flat2_p6, flat2_p5, flat2_p4, flat2_p3, flat2_p2, flat2_p1, flat2_p0, flat2_q0, flat2_q1, flat2_q2, flat2_q3, flat2_q4, flat2_q5, flat2_q6, flat_p2, flat_p1, flat_p0, flat_q0, flat_q1, flat_q2; filt = _mm_and_si128(_mm_subs_epi8(ps1, qs1), hev); work_a = _mm_subs_epi8(qs0, ps0); filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); /* (vpx_filter + 3 * (qs0 - ps0)) & mask */ filt = _mm_and_si128(filt, mask); filter1 = _mm_adds_epi8(filt, t4); filter2 = _mm_adds_epi8(filt, t3); /* Filter1 >> 3 */ work_a = _mm_cmpgt_epi8(zero, filter1); filter1 = _mm_srli_epi16(filter1, 3); work_a = _mm_and_si128(work_a, te0); filter1 = _mm_and_si128(filter1, t1f); filter1 = _mm_or_si128(filter1, work_a); qs0 = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80); /* Filter2 >> 3 */ work_a = _mm_cmpgt_epi8(zero, filter2); filter2 = _mm_srli_epi16(filter2, 3); work_a = _mm_and_si128(work_a, te0); filter2 = _mm_and_si128(filter2, t1f); filter2 = _mm_or_si128(filter2, work_a); ps0 = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80); /* filt >> 1 */ filt = _mm_adds_epi8(filter1, t1); work_a = _mm_cmpgt_epi8(zero, filt); filt = _mm_srli_epi16(filt, 1); work_a = _mm_and_si128(work_a, t80); filt = _mm_and_si128(filt, t7f); filt = _mm_or_si128(filt, work_a); filt = _mm_andnot_si128(hev, filt); ps1 = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80); qs1 = _mm_xor_si128(_mm_subs_epi8(qs1, filt), t80); // loopfilter done { __m128i work; work = _mm_max_epu8( _mm_or_si128(_mm_subs_epu8(p2, p0), _mm_subs_epu8(p0, p2)), _mm_or_si128(_mm_subs_epu8(q2, q0), _mm_subs_epu8(q0, q2))); flat = _mm_max_epu8(work, flat); work = _mm_max_epu8( _mm_or_si128(_mm_subs_epu8(p3, p0), _mm_subs_epu8(p0, p3)), _mm_or_si128(_mm_subs_epu8(q3, q0), _mm_subs_epu8(q0, q3))); flat = _mm_max_epu8(work, flat); work = _mm_max_epu8( _mm_or_si128(_mm_subs_epu8(p4, p0), _mm_subs_epu8(p0, p4)), _mm_or_si128(_mm_subs_epu8(q4, q0), _mm_subs_epu8(q0, q4))); flat = _mm_subs_epu8(flat, one); flat = _mm_cmpeq_epi8(flat, zero); flat = _mm_and_si128(flat, mask); p256_5 = _mm256_castpd_si256( _mm256_broadcast_pd((__m128d const *)(s - 6 * p))); q256_5 = _mm256_castpd_si256( _mm256_broadcast_pd((__m128d const *)(s + 5 * p))); p5 = _mm256_castsi256_si128(p256_5); q5 = _mm256_castsi256_si128(q256_5); flat2 = _mm_max_epu8( _mm_or_si128(_mm_subs_epu8(p5, p0), _mm_subs_epu8(p0, p5)), _mm_or_si128(_mm_subs_epu8(q5, q0), _mm_subs_epu8(q0, q5))); flat2 = _mm_max_epu8(work, flat2); p256_6 = _mm256_castpd_si256( _mm256_broadcast_pd((__m128d const *)(s - 7 * p))); q256_6 = _mm256_castpd_si256( _mm256_broadcast_pd((__m128d const *)(s + 6 * p))); p6 = _mm256_castsi256_si128(p256_6); q6 = _mm256_castsi256_si128(q256_6); work = _mm_max_epu8( _mm_or_si128(_mm_subs_epu8(p6, p0), _mm_subs_epu8(p0, p6)), _mm_or_si128(_mm_subs_epu8(q6, q0), _mm_subs_epu8(q0, q6))); flat2 = _mm_max_epu8(work, flat2); p256_7 = _mm256_castpd_si256( _mm256_broadcast_pd((__m128d const *)(s - 8 * p))); q256_7 = _mm256_castpd_si256( _mm256_broadcast_pd((__m128d const *)(s + 7 * p))); p7 = _mm256_castsi256_si128(p256_7); q7 = _mm256_castsi256_si128(q256_7); work = _mm_max_epu8( _mm_or_si128(_mm_subs_epu8(p7, p0), _mm_subs_epu8(p0, p7)), _mm_or_si128(_mm_subs_epu8(q7, q0), _mm_subs_epu8(q0, q7))); flat2 = _mm_max_epu8(work, flat2); flat2 = _mm_subs_epu8(flat2, one); flat2 = _mm_cmpeq_epi8(flat2, zero); flat2 = _mm_and_si128(flat2, flat); // flat2 & flat & mask } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // flat and wide flat calculations { const __m256i eight = _mm256_set1_epi16(8); const __m256i four = _mm256_set1_epi16(4); __m256i pixelFilter_p, pixelFilter_q, pixetFilter_p2p1p0, pixetFilter_q2q1q0, sum_p7, sum_q7, sum_p3, sum_q3, res_p, res_q; const __m256i filter = _mm256_load_si256((__m256i const *)filt_loopfilter_avx2); p256_7 = _mm256_shuffle_epi8(p256_7, filter); p256_6 = _mm256_shuffle_epi8(p256_6, filter); p256_5 = _mm256_shuffle_epi8(p256_5, filter); p256_4 = _mm256_shuffle_epi8(p256_4, filter); p256_3 = _mm256_shuffle_epi8(p256_3, filter); p256_2 = _mm256_shuffle_epi8(p256_2, filter); p256_1 = _mm256_shuffle_epi8(p256_1, filter); p256_0 = _mm256_shuffle_epi8(p256_0, filter); q256_0 = _mm256_shuffle_epi8(q256_0, filter); q256_1 = _mm256_shuffle_epi8(q256_1, filter); q256_2 = _mm256_shuffle_epi8(q256_2, filter); q256_3 = _mm256_shuffle_epi8(q256_3, filter); q256_4 = _mm256_shuffle_epi8(q256_4, filter); q256_5 = _mm256_shuffle_epi8(q256_5, filter); q256_6 = _mm256_shuffle_epi8(q256_6, filter); q256_7 = _mm256_shuffle_epi8(q256_7, filter); pixelFilter_p = _mm256_add_epi16(_mm256_add_epi16(p256_6, p256_5), _mm256_add_epi16(p256_4, p256_3)); pixelFilter_q = _mm256_add_epi16(_mm256_add_epi16(q256_6, q256_5), _mm256_add_epi16(q256_4, q256_3)); pixetFilter_p2p1p0 = _mm256_add_epi16(p256_0, _mm256_add_epi16(p256_2, p256_1)); pixelFilter_p = _mm256_add_epi16(pixelFilter_p, pixetFilter_p2p1p0); pixetFilter_q2q1q0 = _mm256_add_epi16(q256_0, _mm256_add_epi16(q256_2, q256_1)); pixelFilter_q = _mm256_add_epi16(pixelFilter_q, pixetFilter_q2q1q0); pixelFilter_p = _mm256_add_epi16( eight, _mm256_add_epi16(pixelFilter_p, pixelFilter_q)); pixetFilter_p2p1p0 = _mm256_add_epi16( four, _mm256_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0)); res_p = _mm256_srli_epi16( _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(p256_7, p256_0)), 4); flat2_p0 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); res_q = _mm256_srli_epi16( _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(q256_7, q256_0)), 4); flat2_q0 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); res_p = _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0, _mm256_add_epi16(p256_3, p256_0)), 3); flat_p0 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); res_q = _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0, _mm256_add_epi16(q256_3, q256_0)), 3); flat_q0 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); sum_p7 = _mm256_add_epi16(p256_7, p256_7); sum_q7 = _mm256_add_epi16(q256_7, q256_7); sum_p3 = _mm256_add_epi16(p256_3, p256_3); sum_q3 = _mm256_add_epi16(q256_3, q256_3); pixelFilter_q = _mm256_sub_epi16(pixelFilter_p, p256_6); pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_6); res_p = _mm256_srli_epi16( _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_1)), 4); flat2_p1 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); res_q = _mm256_srli_epi16( _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_1)), 4); flat2_q1 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); pixetFilter_q2q1q0 = _mm256_sub_epi16(pixetFilter_p2p1p0, p256_2); pixetFilter_p2p1p0 = _mm256_sub_epi16(pixetFilter_p2p1p0, q256_2); res_p = _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0, _mm256_add_epi16(sum_p3, p256_1)), 3); flat_p1 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); res_q = _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_q2q1q0, _mm256_add_epi16(sum_q3, q256_1)), 3); flat_q1 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); sum_p7 = _mm256_add_epi16(sum_p7, p256_7); sum_q7 = _mm256_add_epi16(sum_q7, q256_7); sum_p3 = _mm256_add_epi16(sum_p3, p256_3); sum_q3 = _mm256_add_epi16(sum_q3, q256_3); pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_5); pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_5); res_p = _mm256_srli_epi16( _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_2)), 4); flat2_p2 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); res_q = _mm256_srli_epi16( _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_2)), 4); flat2_q2 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); pixetFilter_p2p1p0 = _mm256_sub_epi16(pixetFilter_p2p1p0, q256_1); pixetFilter_q2q1q0 = _mm256_sub_epi16(pixetFilter_q2q1q0, p256_1); res_p = _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0, _mm256_add_epi16(sum_p3, p256_2)), 3); flat_p2 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); res_q = _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_q2q1q0, _mm256_add_epi16(sum_q3, q256_2)), 3); flat_q2 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); sum_p7 = _mm256_add_epi16(sum_p7, p256_7); sum_q7 = _mm256_add_epi16(sum_q7, q256_7); pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_4); pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_4); res_p = _mm256_srli_epi16( _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_3)), 4); flat2_p3 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); res_q = _mm256_srli_epi16( _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_3)), 4); flat2_q3 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); sum_p7 = _mm256_add_epi16(sum_p7, p256_7); sum_q7 = _mm256_add_epi16(sum_q7, q256_7); pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_3); pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_3); res_p = _mm256_srli_epi16( _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_4)), 4); flat2_p4 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); res_q = _mm256_srli_epi16( _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_4)), 4); flat2_q4 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); sum_p7 = _mm256_add_epi16(sum_p7, p256_7); sum_q7 = _mm256_add_epi16(sum_q7, q256_7); pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_2); pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_2); res_p = _mm256_srli_epi16( _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_5)), 4); flat2_p5 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); res_q = _mm256_srli_epi16( _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_5)), 4); flat2_q5 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); sum_p7 = _mm256_add_epi16(sum_p7, p256_7); sum_q7 = _mm256_add_epi16(sum_q7, q256_7); pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_1); pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_1); res_p = _mm256_srli_epi16( _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_6)), 4); flat2_p6 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); res_q = _mm256_srli_epi16( _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_6)), 4); flat2_q6 = _mm256_castsi256_si128( _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); } // wide flat // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ p2 = _mm_andnot_si128(flat, p2); flat_p2 = _mm_and_si128(flat, flat_p2); p2 = _mm_or_si128(flat_p2, p2); p1 = _mm_andnot_si128(flat, ps1); flat_p1 = _mm_and_si128(flat, flat_p1); p1 = _mm_or_si128(flat_p1, p1); p0 = _mm_andnot_si128(flat, ps0); flat_p0 = _mm_and_si128(flat, flat_p0); p0 = _mm_or_si128(flat_p0, p0); q0 = _mm_andnot_si128(flat, qs0); flat_q0 = _mm_and_si128(flat, flat_q0); q0 = _mm_or_si128(flat_q0, q0); q1 = _mm_andnot_si128(flat, qs1); flat_q1 = _mm_and_si128(flat, flat_q1); q1 = _mm_or_si128(flat_q1, q1); q2 = _mm_andnot_si128(flat, q2); flat_q2 = _mm_and_si128(flat, flat_q2); q2 = _mm_or_si128(flat_q2, q2); p6 = _mm_andnot_si128(flat2, p6); flat2_p6 = _mm_and_si128(flat2, flat2_p6); p6 = _mm_or_si128(flat2_p6, p6); _mm_storeu_si128((__m128i *)(s - 7 * p), p6); p5 = _mm_andnot_si128(flat2, p5); flat2_p5 = _mm_and_si128(flat2, flat2_p5); p5 = _mm_or_si128(flat2_p5, p5); _mm_storeu_si128((__m128i *)(s - 6 * p), p5); p4 = _mm_andnot_si128(flat2, p4); flat2_p4 = _mm_and_si128(flat2, flat2_p4); p4 = _mm_or_si128(flat2_p4, p4); _mm_storeu_si128((__m128i *)(s - 5 * p), p4); p3 = _mm_andnot_si128(flat2, p3); flat2_p3 = _mm_and_si128(flat2, flat2_p3); p3 = _mm_or_si128(flat2_p3, p3); _mm_storeu_si128((__m128i *)(s - 4 * p), p3); p2 = _mm_andnot_si128(flat2, p2); flat2_p2 = _mm_and_si128(flat2, flat2_p2); p2 = _mm_or_si128(flat2_p2, p2); _mm_storeu_si128((__m128i *)(s - 3 * p), p2); p1 = _mm_andnot_si128(flat2, p1); flat2_p1 = _mm_and_si128(flat2, flat2_p1); p1 = _mm_or_si128(flat2_p1, p1); _mm_storeu_si128((__m128i *)(s - 2 * p), p1); p0 = _mm_andnot_si128(flat2, p0); flat2_p0 = _mm_and_si128(flat2, flat2_p0); p0 = _mm_or_si128(flat2_p0, p0); _mm_storeu_si128((__m128i *)(s - 1 * p), p0); q0 = _mm_andnot_si128(flat2, q0); flat2_q0 = _mm_and_si128(flat2, flat2_q0); q0 = _mm_or_si128(flat2_q0, q0); _mm_storeu_si128((__m128i *)(s - 0 * p), q0); q1 = _mm_andnot_si128(flat2, q1); flat2_q1 = _mm_and_si128(flat2, flat2_q1); q1 = _mm_or_si128(flat2_q1, q1); _mm_storeu_si128((__m128i *)(s + 1 * p), q1); q2 = _mm_andnot_si128(flat2, q2); flat2_q2 = _mm_and_si128(flat2, flat2_q2); q2 = _mm_or_si128(flat2_q2, q2); _mm_storeu_si128((__m128i *)(s + 2 * p), q2); q3 = _mm_andnot_si128(flat2, q3); flat2_q3 = _mm_and_si128(flat2, flat2_q3); q3 = _mm_or_si128(flat2_q3, q3); _mm_storeu_si128((__m128i *)(s + 3 * p), q3); q4 = _mm_andnot_si128(flat2, q4); flat2_q4 = _mm_and_si128(flat2, flat2_q4); q4 = _mm_or_si128(flat2_q4, q4); _mm_storeu_si128((__m128i *)(s + 4 * p), q4); q5 = _mm_andnot_si128(flat2, q5); flat2_q5 = _mm_and_si128(flat2, flat2_q5); q5 = _mm_or_si128(flat2_q5, q5); _mm_storeu_si128((__m128i *)(s + 5 * p), q5); q6 = _mm_andnot_si128(flat2, q6); flat2_q6 = _mm_and_si128(flat2, flat2_q6); q6 = _mm_or_si128(flat2_q6, q6); _mm_storeu_si128((__m128i *)(s + 6 * p), q6); } }
SIMD_INLINE __m256i DivideBy64(__m256i value) { return _mm256_srli_epi16(_mm256_add_epi16(value, K16_0020), 6); }
__m256i test_mm256_srli_epi16(__m256i a) { // CHECK-LABEL: test_mm256_srli_epi16 // CHECK: call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %{{.*}}, i32 %{{.*}}) return _mm256_srli_epi16(a, 3); }
__m256i test_mm256_srli_epi16(__m256i a) { // CHECK: @llvm.x86.avx2.psrli.w return _mm256_srli_epi16(a, 3); }
int normHamming(const uchar* a, const uchar* b, int n) { CV_AVX_GUARD; int i = 0; int result = 0; #if CV_AVX2 { __m256i _r0 = _mm256_setzero_si256(); __m256i _0 = _mm256_setzero_si256(); __m256i _popcnt_table = _mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4); __m256i _popcnt_mask = _mm256_set1_epi8(0x0F); for(; i <= n - 32; i+= 32) { __m256i _a0 = _mm256_loadu_si256((const __m256i*)(a + i)); __m256i _b0 = _mm256_loadu_si256((const __m256i*)(b + i)); __m256i _xor = _mm256_xor_si256(_a0, _b0); __m256i _popc0 = _mm256_shuffle_epi8(_popcnt_table, _mm256_and_si256(_xor, _popcnt_mask)); __m256i _popc1 = _mm256_shuffle_epi8(_popcnt_table, _mm256_and_si256(_mm256_srli_epi16(_xor, 4), _popcnt_mask)); _r0 = _mm256_add_epi32(_r0, _mm256_sad_epu8(_0, _mm256_add_epi8(_popc0, _popc1))); } _r0 = _mm256_add_epi32(_r0, _mm256_shuffle_epi32(_r0, 2)); result = _mm256_extract_epi32_(_mm256_add_epi32(_r0, _mm256_permute2x128_si256(_r0, _r0, 1)), 0); } #endif // CV_AVX2 #if CV_POPCNT { # if defined CV_POPCNT_U64 for(; i <= n - 8; i += 8) { result += (int)CV_POPCNT_U64(*(uint64*)(a + i) ^ *(uint64*)(b + i)); } # endif for(; i <= n - 4; i += 4) { result += CV_POPCNT_U32(*(uint*)(a + i) ^ *(uint*)(b + i)); } } #endif // CV_POPCNT #if CV_SIMD128 { v_uint32x4 t = v_setzero_u32(); for(; i <= n - v_uint8x16::nlanes; i += v_uint8x16::nlanes) { t += v_popcount(v_load(a + i) ^ v_load(b + i)); } result += v_reduce_sum(t); } #endif // CV_SIMD128 #if CV_ENABLE_UNROLLED for(; i <= n - 4; i += 4) { result += popCountTable[a[i] ^ b[i]] + popCountTable[a[i+1] ^ b[i+1]] + popCountTable[a[i+2] ^ b[i+2]] + popCountTable[a[i+3] ^ b[i+3]]; } #endif for(; i < n; i++) { result += popCountTable[a[i] ^ b[i]]; } return result; }
template <> SIMD_INLINE __m256i DivideBy256<false>(__m256i value) { return _mm256_srli_epi16(value, 8); }
template<bool align> SIMD_INLINE __m256i InterpolateY(const __m256i * pbx0, const __m256i * pbx1, __m256i alpha[2]) { __m256i sum = _mm256_add_epi16(_mm256_mullo_epi16(Load<align>(pbx0), alpha[0]), _mm256_mullo_epi16(Load<align>(pbx1), alpha[1])); return _mm256_srli_epi16(_mm256_add_epi16(sum, K16_FRACTION_ROUND_TERM), Base::BILINEAR_SHIFT); }