static unsigned int sad_w64_avg_avx2(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const int h, const uint8_t *second_pred, const int second_pred_stride) { int i, res; __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; __m256i sum_sad = _mm256_setzero_si256(); __m256i sum_sad_h; __m128i sum_sad128; for (i = 0; i < h; i++) { ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32)); ref1_reg = _mm256_avg_epu8( ref1_reg, _mm256_loadu_si256((__m256i const *)second_pred)); ref2_reg = _mm256_avg_epu8( ref2_reg, _mm256_loadu_si256((__m256i const *)(second_pred + 32))); sad1_reg = _mm256_sad_epu8(ref1_reg, _mm256_loadu_si256((__m256i const *)src_ptr)); sad2_reg = _mm256_sad_epu8( ref2_reg, _mm256_loadu_si256((__m256i const *)(src_ptr + 32))); sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); ref_ptr += ref_stride; src_ptr += src_stride; second_pred += second_pred_stride; } sum_sad_h = _mm256_srli_si256(sum_sad, 8); sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); res = _mm_cvtsi128_si32(sum_sad128); return res; }
SIMD_INLINE __m256i AbsSecondDerivative(const uint8_t * src, ptrdiff_t step) { const __m256i s0 = Load<srcAlign && stepAlign>((__m256i*)(src - step)); const __m256i s1 = Load<srcAlign>((__m256i*)src); const __m256i s2 = Load<srcAlign && stepAlign>((__m256i*)(src + step)); return AbsDifferenceU8(_mm256_avg_epu8(s0, s2), s1); }
void static avx2_test (void) { union256i_b u, s1, s2; unsigned char e[32]; int tmp; int i; s1.x = _mm256_set_epi8 (1, 2, 3, 4, 10, 20, 30, 90, -80, -40, -100, -15, 98, 25, 98, 7, 88, 44, 33, 22, 11, 98, 76, -100, -34, -78, -39, 6, 3, 4, 5, 119); s2.x = _mm256_set_epi8 (88, 44, 33, 22, 11, 98, 76, -100, -34, -78, -39, 6, 3, 4, 5, 119, 1, 2, 3, 4, 10, 20, 30, 90, -80, -40, -100, -15, 98, 25, 98, 7); u.x = _mm256_avg_epu8 (s1.x, s2.x); for (i = 0; i < 32; i++) e[i] = ((unsigned char) s1.a[i] + (unsigned char) s2.a[i] + 1) >> 1; if (check_union256i_b (u, e)) abort (); }
__m256i test_mm256_avg_epu8(__m256i a, __m256i b) { // CHECK: @llvm.x86.avx2.pavg.b return _mm256_avg_epu8(a, b); }
__m256i test_mm256_avg_epu8(__m256i a, __m256i b) { // CHECK-LABEL: test_mm256_avg_epu8 // CHECK: call <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8> %{{.*}}, <32 x i8> %{{.*}}) return _mm256_avg_epu8(a, b); }
template <> SIMD_INLINE __m256i OperationBinary8u<SimdOperationBinary8uAverage>(const __m256i & a, const __m256i & b) { return _mm256_avg_epu8(a, b); }
unsigned int vp9_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride, int x_offset, int y_offset, const uint8_t *dst, int dst_stride, int height, unsigned int *sse) { __m256i src_reg, dst_reg, exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi; __m256i sse_reg, sum_reg, sse_reg_hi, res_cmp, sum_reg_lo, sum_reg_hi; __m256i zero_reg; int i, sum; sum_reg = _mm256_set1_epi16(0); sse_reg = _mm256_set1_epi16(0); zero_reg = _mm256_set1_epi16(0); // x_offset = 0 and y_offset = 0 if (x_offset == 0) { if (y_offset == 0) { for (i = 0; i < height ; i++) { LOAD_SRC_DST // expend each byte to 2 bytes MERGE_WITH_SRC(src_reg, zero_reg) CALC_SUM_SSE_INSIDE_LOOP src+= src_stride; dst+= dst_stride; } // x_offset = 0 and y_offset = 8 } else if (y_offset == 8) { __m256i src_next_reg; for (i = 0; i < height ; i++) { LOAD_SRC_DST AVG_NEXT_SRC(src_reg, src_stride) // expend each byte to 2 bytes MERGE_WITH_SRC(src_reg, zero_reg) CALC_SUM_SSE_INSIDE_LOOP src+= src_stride; dst+= dst_stride; } // x_offset = 0 and y_offset = bilin interpolation } else { __m256i filter, pw8, src_next_reg; y_offset <<= 5; filter = _mm256_load_si256((__m256i const *) (bilinear_filters_avx2 + y_offset)); pw8 = _mm256_set1_epi16(8); for (i = 0; i < height ; i++) { LOAD_SRC_DST MERGE_NEXT_SRC(src_reg, src_stride) FILTER_SRC(filter) CALC_SUM_SSE_INSIDE_LOOP src+= src_stride; dst+= dst_stride; } } // x_offset = 8 and y_offset = 0 } else if (x_offset == 8) { if (y_offset == 0) { __m256i src_next_reg; for (i = 0; i < height ; i++) { LOAD_SRC_DST AVG_NEXT_SRC(src_reg, 1) // expand each byte to 2 bytes MERGE_WITH_SRC(src_reg, zero_reg) CALC_SUM_SSE_INSIDE_LOOP src+= src_stride; dst+= dst_stride; } // x_offset = 8 and y_offset = 8 } else if (y_offset == 8) { __m256i src_next_reg, src_avg; // load source and another source starting from the next // following byte src_reg = _mm256_loadu_si256((__m256i const *) (src)); AVG_NEXT_SRC(src_reg, 1) for (i = 0; i < height ; i++) { src_avg = src_reg; src+= src_stride; LOAD_SRC_DST AVG_NEXT_SRC(src_reg, 1) // average between previous average to current average src_avg = _mm256_avg_epu8(src_avg, src_reg); // expand each byte to 2 bytes MERGE_WITH_SRC(src_avg, zero_reg) // save current source average CALC_SUM_SSE_INSIDE_LOOP dst+= dst_stride; } // x_offset = 8 and y_offset = bilin interpolation } else {