int64_t vp9_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff, intptr_t block_size, int64_t *ssz) { __m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg; __m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi; __m256i sse_reg_64hi, ssz_reg_64hi; __m128i sse_reg128, ssz_reg128; int64_t sse; int i; const __m256i zero_reg = _mm256_set1_epi16(0); // init sse and ssz registerd to zero sse_reg = _mm256_set1_epi16(0); ssz_reg = _mm256_set1_epi16(0); for (i = 0 ; i < block_size ; i+= 16) { // load 32 bytes from coeff and dqcoeff coeff_reg = _mm256_loadu_si256((const __m256i *)(coeff + i)); dqcoeff_reg = _mm256_loadu_si256((const __m256i *)(dqcoeff + i)); // dqcoeff - coeff dqcoeff_reg = _mm256_sub_epi16(dqcoeff_reg, coeff_reg); // madd (dqcoeff - coeff) dqcoeff_reg = _mm256_madd_epi16(dqcoeff_reg, dqcoeff_reg); // madd coeff coeff_reg = _mm256_madd_epi16(coeff_reg, coeff_reg); // expand each double word of madd (dqcoeff - coeff) to quad word exp_dqcoeff_lo = _mm256_unpacklo_epi32(dqcoeff_reg, zero_reg); exp_dqcoeff_hi = _mm256_unpackhi_epi32(dqcoeff_reg, zero_reg); // expand each double word of madd (coeff) to quad word exp_coeff_lo = _mm256_unpacklo_epi32(coeff_reg, zero_reg); exp_coeff_hi = _mm256_unpackhi_epi32(coeff_reg, zero_reg); // add each quad word of madd (dqcoeff - coeff) and madd (coeff) sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_lo); ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_lo); sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_hi); ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_hi); } // save the higher 64 bit of each 128 bit lane sse_reg_64hi = _mm256_srli_si256(sse_reg, 8); ssz_reg_64hi = _mm256_srli_si256(ssz_reg, 8); // add the higher 64 bit to the low 64 bit sse_reg = _mm256_add_epi64(sse_reg, sse_reg_64hi); ssz_reg = _mm256_add_epi64(ssz_reg, ssz_reg_64hi); // add each 64 bit from each of the 128 bit lane of the 256 bit sse_reg128 = _mm_add_epi64(_mm256_castsi256_si128(sse_reg), _mm256_extractf128_si256(sse_reg, 1)); ssz_reg128 = _mm_add_epi64(_mm256_castsi256_si128(ssz_reg), _mm256_extractf128_si256(ssz_reg, 1)); // store the results _mm_storel_epi64((__m128i*)(&sse), sse_reg128); _mm_storel_epi64((__m128i*)(ssz), ssz_reg128); return sse; }
static INLINE unsigned int highbd_masked_sad16xh_avx2( const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride, const uint8_t *m_ptr, int m_stride, int width, int height) { const uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src8); const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8); const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8); int x, y; __m256i res = _mm256_setzero_si256(); const __m256i mask_max = _mm256_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS)); const __m256i round_const = _mm256_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1); const __m256i one = _mm256_set1_epi16(1); for (y = 0; y < height; y++) { for (x = 0; x < width; x += 16) { const __m256i src = _mm256_lddqu_si256((const __m256i *)&src_ptr[x]); const __m256i a = _mm256_lddqu_si256((const __m256i *)&a_ptr[x]); const __m256i b = _mm256_lddqu_si256((const __m256i *)&b_ptr[x]); // Zero-extend mask to 16 bits const __m256i m = _mm256_cvtepu8_epi16(_mm_lddqu_si128((const __m128i *)&m_ptr[x])); const __m256i m_inv = _mm256_sub_epi16(mask_max, m); const __m256i data_l = _mm256_unpacklo_epi16(a, b); const __m256i mask_l = _mm256_unpacklo_epi16(m, m_inv); __m256i pred_l = _mm256_madd_epi16(data_l, mask_l); pred_l = _mm256_srai_epi32(_mm256_add_epi32(pred_l, round_const), AOM_BLEND_A64_ROUND_BITS); const __m256i data_r = _mm256_unpackhi_epi16(a, b); const __m256i mask_r = _mm256_unpackhi_epi16(m, m_inv); __m256i pred_r = _mm256_madd_epi16(data_r, mask_r); pred_r = _mm256_srai_epi32(_mm256_add_epi32(pred_r, round_const), AOM_BLEND_A64_ROUND_BITS); // Note: the maximum value in pred_l/r is (2^bd)-1 < 2^15, // so it is safe to do signed saturation here. const __m256i pred = _mm256_packs_epi32(pred_l, pred_r); // There is no 16-bit SAD instruction, so we have to synthesize // an 8-element SAD. We do this by storing 4 32-bit partial SADs, // and accumulating them at the end const __m256i diff = _mm256_abs_epi16(_mm256_sub_epi16(pred, src)); res = _mm256_add_epi32(res, _mm256_madd_epi16(diff, one)); } src_ptr += src_stride; a_ptr += a_stride; b_ptr += b_stride; m_ptr += m_stride; } // At this point, we have four 32-bit partial SADs stored in 'res'. res = _mm256_hadd_epi32(res, res); res = _mm256_hadd_epi32(res, res); int sad = _mm256_extract_epi32(res, 0) + _mm256_extract_epi32(res, 4); return (sad + 31) >> 6; }
SIMD_INLINE __m256i BgraToGray32(__m256i bgra) { const __m256i g0a0 = _mm256_and_si256(_mm256_srli_si256(bgra, 1), K16_00FF); const __m256i b0r0 = _mm256_and_si256(bgra, K16_00FF); const __m256i weightedSum = _mm256_add_epi32(_mm256_madd_epi16(g0a0, K16_GREEN_0000), _mm256_madd_epi16(b0r0, K16_BLUE_RED)); return _mm256_srli_epi32(_mm256_add_epi32(weightedSum, K32_ROUND_TERM), Base::BGR_TO_GRAY_AVERAGING_SHIFT); }
static FORCE_INLINE __m256i lookup_AVX2(const int16_t *VXFull, const int16_t *VYFull, const PixelType *pref, int w, const __m256i &dwords_time256, const __m256i &dwords_ref_pitch, const __m256i &dwords_hoffsets) { __m256i vx = _mm256_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)&VXFull[w])); __m256i vy = _mm256_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)&VYFull[w])); vx = _mm256_madd_epi16(vx, dwords_time256); vx = _mm256_srai_epi32(vx, 8); vy = _mm256_madd_epi16(vy, dwords_time256); vy = _mm256_srai_epi32(vy, 8); __m256i addr = _mm256_madd_epi16(vy, dwords_ref_pitch); addr = _mm256_add_epi32(addr, vx); addr = _mm256_add_epi32(addr, dwords_hoffsets); // It's okay to read two or three bytes more than needed. pref is always padded, unless the user chooses a horizontal padding of 0, which would be stupid. __m256i gathered = _mm256_i32gather_epi32((const int *)pref, addr, sizeof(PixelType)); gathered = _mm256_and_si256(gathered, _mm256_set1_epi32((1 << (sizeof(PixelType) * 8)) - 1)); return gathered; }
static INLINE void variance_kernel_avx2(const __m256i src, const __m256i ref, __m256i *const sse, __m256i *const sum) { const __m256i adj_sub = _mm256_set1_epi16(0xff01); // (1,-1) // unpack into pairs of source and reference values const __m256i src_ref0 = _mm256_unpacklo_epi8(src, ref); const __m256i src_ref1 = _mm256_unpackhi_epi8(src, ref); // subtract adjacent elements using src*1 + ref*-1 const __m256i diff0 = _mm256_maddubs_epi16(src_ref0, adj_sub); const __m256i diff1 = _mm256_maddubs_epi16(src_ref1, adj_sub); const __m256i madd0 = _mm256_madd_epi16(diff0, diff0); const __m256i madd1 = _mm256_madd_epi16(diff1, diff1); // add to the running totals *sum = _mm256_add_epi16(*sum, _mm256_add_epi16(diff0, diff1)); *sse = _mm256_add_epi32(*sse, _mm256_add_epi32(madd0, madd1)); }
static INLINE __m256i highbd_comp_mask_pred_line_avx2(const __m256i s0, const __m256i s1, const __m256i a) { const __m256i alpha_max = _mm256_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS)); const __m256i round_const = _mm256_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1); const __m256i a_inv = _mm256_sub_epi16(alpha_max, a); const __m256i s_lo = _mm256_unpacklo_epi16(s0, s1); const __m256i a_lo = _mm256_unpacklo_epi16(a, a_inv); const __m256i pred_lo = _mm256_madd_epi16(s_lo, a_lo); const __m256i pred_l = _mm256_srai_epi32( _mm256_add_epi32(pred_lo, round_const), AOM_BLEND_A64_ROUND_BITS); const __m256i s_hi = _mm256_unpackhi_epi16(s0, s1); const __m256i a_hi = _mm256_unpackhi_epi16(a, a_inv); const __m256i pred_hi = _mm256_madd_epi16(s_hi, a_hi); const __m256i pred_h = _mm256_srai_epi32( _mm256_add_epi32(pred_hi, round_const), AOM_BLEND_A64_ROUND_BITS); const __m256i comp = _mm256_packs_epi32(pred_l, pred_h); return comp; }
static FORCE_INLINE void FlowInterSimple_double_8px_AVX2( int w, PixelType *pdst, const PixelType *prefB, const PixelType *prefF, const int16_t *VXFullB, const int16_t *VXFullF, const int16_t *VYFullB, const int16_t *VYFullF, const uint8_t *MaskB, const uint8_t *MaskF, int nPelLog, const __m256i &dwords_ref_pitch, const __m256i &dwords_hoffsets) { __m256i dwords_w = _mm256_add_epi32(_mm256_set1_epi32(w << nPelLog), dwords_hoffsets); /// maybe do it another way __m256i dstF = lookup_double_AVX2(VXFullF, VYFullF, prefF, w, dwords_ref_pitch, dwords_w); __m256i dstB = lookup_double_AVX2(VXFullB, VYFullB, prefB, w, dwords_ref_pitch, dwords_w); __m256i maskf = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)&MaskF[w])); __m256i maskb = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)&MaskB[w])); __m256i dstF_dstB = _mm256_add_epi32(dstF, dstB); dstF_dstB = _mm256_slli_epi32(dstF_dstB, 8); __m256i dst; if (sizeof(PixelType) == 1) { __m256i dstB_dstF = _mm256_sub_epi16(dstB, dstF); __m256i maskf_maskb = _mm256_sub_epi16(maskf, maskb); dst = _mm256_madd_epi16(dstB_dstF, maskf_maskb); } else { __m256i dstB_dstF = _mm256_sub_epi32(dstB, dstF); __m256i maskf_maskb = _mm256_sub_epi32(maskf, maskb); dst = _mm256_mullo_epi32(dstB_dstF, maskf_maskb); } dst = _mm256_add_epi32(dst, dstF_dstB); dst = _mm256_srai_epi32(dst, 9); dst = _mm256_packus_epi32(dst, dst); dst = _mm256_permute4x64_epi64(dst, 0xe8); // 0b11101000 - copy third qword to second qword __m128i dst128 = _mm256_castsi256_si128(dst); if (sizeof(PixelType) == 1) { dst128 = _mm_packus_epi16(dst128, dst128); _mm_storel_epi64((__m128i *)&pdst[w], dst128); } else { _mm_storeu_si128((__m128i *)&pdst[w], dst128); } }
int vpx_satd_avx2(const tran_low_t *coeff, int length) { const __m256i one = _mm256_set1_epi16(1); __m256i accum = _mm256_setzero_si256(); int i; for (i = 0; i < length; i += 16) { const __m256i src_line = load_tran_low(coeff); const __m256i abs = _mm256_abs_epi16(src_line); const __m256i sum = _mm256_madd_epi16(abs, one); accum = _mm256_add_epi32(accum, sum); coeff += 16; } { // 32 bit horizontal add const __m256i a = _mm256_srli_si256(accum, 8); const __m256i b = _mm256_add_epi32(accum, a); const __m256i c = _mm256_srli_epi64(b, 32); const __m256i d = _mm256_add_epi32(b, c); const __m128i accum_128 = _mm_add_epi32(_mm256_castsi256_si128(d), _mm256_extractf128_si256(d, 1)); return _mm_cvtsi128_si32(accum_128); } }
static FORCE_INLINE void FlowInterExtra_8px_AVX2( int w, PixelType *pdst, const PixelType *prefB, const PixelType *prefF, const int16_t *VXFullB, const int16_t *VXFullF, const int16_t *VYFullB, const int16_t *VYFullF, const uint8_t *MaskB, const uint8_t *MaskF, int nPelLog, const int16_t *VXFullBB, const int16_t *VXFullFF, const int16_t *VYFullBB, const int16_t *VYFullFF, const __m256i &dwords_time256, const __m256i &dwords_256_time256, const __m256i &dwords_ref_pitch, const __m256i &dwords_hoffsets) { __m256i dwords_w = _mm256_add_epi32(_mm256_set1_epi32(w << nPelLog), dwords_hoffsets); __m256i dstF = lookup_AVX2(VXFullF, VYFullF, prefF, w, dwords_time256, dwords_ref_pitch, dwords_w); __m256i dstB = lookup_AVX2(VXFullB, VYFullB, prefB, w, dwords_256_time256, dwords_ref_pitch, dwords_w); __m256i dstFF = lookup_AVX2(VXFullFF, VYFullFF, prefF, w, dwords_time256, dwords_ref_pitch, dwords_w); __m256i dstBB = lookup_AVX2(VXFullBB, VYFullBB, prefB, w, dwords_256_time256, dwords_ref_pitch, dwords_w); __m256i minfb = mm256_min_epu<PixelType>(dstF, dstB); __m256i maxfb = mm256_max_epu<PixelType>(dstF, dstB); __m256i medianBB = mm256_max_epu<PixelType>(minfb, mm256_min_epu<PixelType>(maxfb, dstBB)); __m256i medianFF = mm256_max_epu<PixelType>(minfb, mm256_min_epu<PixelType>(maxfb, dstFF)); __m256i maskf = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)&MaskF[w])); __m256i maskb = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)&MaskB[w])); const __m256i dwords_255 = _mm256_set1_epi32(255); __m256i maskf_inv = _mm256_sub_epi32(dwords_255, maskf); __m256i maskb_inv = _mm256_sub_epi32(dwords_255, maskb); if (sizeof(PixelType) == 1) { dstF = _mm256_mullo_epi16(dstF, maskf_inv); dstB = _mm256_mullo_epi16(dstB, maskb_inv); medianBB = _mm256_mullo_epi16(medianBB, maskf); medianFF = _mm256_mullo_epi16(medianFF, maskb); } else { dstF = _mm256_mullo_epi32(dstF, maskf_inv); dstB = _mm256_mullo_epi32(dstB, maskb_inv); medianBB = _mm256_mullo_epi32(medianBB, maskf); medianFF = _mm256_mullo_epi32(medianFF, maskb); } dstF = _mm256_add_epi32(dstF, dwords_255); dstB = _mm256_add_epi32(dstB, dwords_255); dstF = _mm256_add_epi32(dstF, medianBB); dstB = _mm256_add_epi32(dstB, medianFF); dstF = _mm256_srai_epi32(dstF, 8); dstB = _mm256_srai_epi32(dstB, 8); if (sizeof(PixelType) == 2) { dstF = _mm256_sub_epi16(dstF, _mm256_set1_epi32(32768)); dstB = _mm256_sub_epi16(dstB, _mm256_set1_epi32(32768)); } dstF = _mm256_madd_epi16(dstF, dwords_256_time256); dstB = _mm256_madd_epi16(dstB, dwords_time256); if (sizeof(PixelType) == 2) { // dstF = _mm256_add_epi32(dstF, _mm256_slli_epi32(dwords_256_time256, 15)); // dstB = _mm256_add_epi32(dstB, _mm256_slli_epi32(dwords_time256, 15)); // Knowing that they add up to 256, the two additions can be combined. dstF = _mm256_add_epi32(dstF, _mm256_set1_epi32(256 << 15)); } __m256i dst = _mm256_add_epi32(dstF, dstB); dst = _mm256_srai_epi32(dst, 8); dst = _mm256_packus_epi32(dst, dst); dst = _mm256_permute4x64_epi64(dst, 0xe8); // 0b11101000 - copy third qword to second qword __m128i dst128 = _mm256_castsi256_si128(dst); if (sizeof(PixelType) == 1) { dst128 = _mm_packus_epi16(dst128, dst128); _mm_storel_epi64((__m128i *)&pdst[w], dst128); } else { _mm_storeu_si128((__m128i *)&pdst[w], dst128); } }
static FORCE_INLINE void FlowInter_8px_AVX2( int w, PixelType *pdst, const PixelType *prefB, const PixelType *prefF, const int16_t *VXFullB, const int16_t *VXFullF, const int16_t *VYFullB, const int16_t *VYFullF, const uint8_t *MaskB, const uint8_t *MaskF, int nPelLog, const __m256i &dwords_time256, const __m256i &dwords_256_time256, const __m256i &dwords_ref_pitch, const __m256i &dwords_hoffsets) { __m256i dwords_w = _mm256_add_epi32(_mm256_set1_epi32(w << nPelLog), dwords_hoffsets); __m256i dstF = lookup_AVX2(VXFullF, VYFullF, prefF, w, dwords_time256, dwords_ref_pitch, dwords_w); __m256i dstB = lookup_AVX2(VXFullB, VYFullB, prefB, w, dwords_256_time256, dwords_ref_pitch, dwords_w); __m256i dstF0 = _mm256_i32gather_epi32((const int *)prefF, dwords_w, sizeof(PixelType)); __m256i dstB0 = _mm256_i32gather_epi32((const int *)prefB, dwords_w, sizeof(PixelType)); dstF0 = _mm256_and_si256(dstF0, _mm256_set1_epi32((1 << (sizeof(PixelType) * 8)) - 1)); dstB0 = _mm256_and_si256(dstB0, _mm256_set1_epi32((1 << (sizeof(PixelType) * 8)) - 1)); __m256i maskf = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)&MaskF[w])); __m256i maskb = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)&MaskB[w])); const __m256i dwords_255 = _mm256_set1_epi32(255); __m256i maskf_inv = _mm256_sub_epi32(dwords_255, maskf); __m256i maskb_inv = _mm256_sub_epi32(dwords_255, maskb); __m256i dstF_maskf_inv, dstB_maskb_inv, dstF0_maskb, dstB0_maskf; if (sizeof(PixelType) == 1) { dstF_maskf_inv = _mm256_mullo_epi16(dstF, maskf_inv); dstB_maskb_inv = _mm256_mullo_epi16(dstB, maskb_inv); dstF0_maskb = _mm256_mullo_epi16(dstF0, maskb); dstB0_maskf = _mm256_mullo_epi16(dstB0, maskf); } else { dstF_maskf_inv = _mm256_mullo_epi32(dstF, maskf_inv); dstB_maskb_inv = _mm256_mullo_epi32(dstB, maskb_inv); dstF0_maskb = _mm256_mullo_epi32(dstF0, maskb); dstB0_maskf = _mm256_mullo_epi32(dstB0, maskf); } __m256i f = _mm256_add_epi32(dstF0_maskb, dstB_maskb_inv); __m256i b = _mm256_add_epi32(dstB0_maskf, dstF_maskf_inv); if (sizeof(PixelType) == 1) { f = _mm256_mullo_epi32(f, maskf); b = _mm256_mullo_epi32(b, maskb); f = _mm256_add_epi32(f, dwords_255); b = _mm256_add_epi32(b, dwords_255); f = _mm256_srai_epi32(f, 8); b = _mm256_srai_epi32(b, 8); } else { const __m256i qwords_255 = _mm256_set1_epi64x(255); __m256i tempf = _mm256_mul_epu32(f, maskf); __m256i tempb = _mm256_mul_epu32(b, maskb); tempf = _mm256_add_epi64(tempf, qwords_255); tempb = _mm256_add_epi64(tempb, qwords_255); tempf = _mm256_srli_epi64(tempf, 8); tempb = _mm256_srli_epi64(tempb, 8); f = _mm256_srli_epi64(f, 32); b = _mm256_srli_epi64(b, 32); f = _mm256_mul_epu32(f, _mm256_srli_epi64(maskf, 32)); b = _mm256_mul_epu32(b, _mm256_srli_epi64(maskb, 32)); f = _mm256_add_epi64(f, qwords_255); b = _mm256_add_epi64(b, qwords_255); f = _mm256_srli_epi64(f, 8); b = _mm256_srli_epi64(b, 8); f = _mm256_or_si256(tempf, _mm256_slli_epi64(f, 32)); b = _mm256_or_si256(tempb, _mm256_slli_epi64(b, 32)); } f = _mm256_add_epi32(f, dstF_maskf_inv); b = _mm256_add_epi32(b, dstB_maskb_inv); f = _mm256_add_epi32(f, dwords_255); b = _mm256_add_epi32(b, dwords_255); f = _mm256_srai_epi32(f, 8); b = _mm256_srai_epi32(b, 8); if (sizeof(PixelType) == 1) { f = _mm256_madd_epi16(f, dwords_256_time256); b = _mm256_madd_epi16(b, dwords_time256); } else { f = _mm256_mullo_epi32(f, dwords_256_time256); b = _mm256_mullo_epi32(b, dwords_time256); } __m256i dst = _mm256_add_epi32(f, b); dst = _mm256_srai_epi32(dst, 8); dst = _mm256_packus_epi32(dst, dst); dst = _mm256_permute4x64_epi64(dst, 0xe8); // 0b11101000 - copy third qword to second qword __m128i dst128 = _mm256_castsi256_si128(dst); if (sizeof(PixelType) == 1) { dst128 = _mm_packus_epi16(dst128, dst128); _mm_storel_epi64((__m128i *)&pdst[w], dst128); } else { _mm_storeu_si128((__m128i *)&pdst[w], dst128); } }
static FORCE_INLINE void FlowInterSimple_generic_8px_AVX2( int w, PixelType *pdst, const PixelType *prefB, const PixelType *prefF, const int16_t *VXFullB, const int16_t *VXFullF, const int16_t *VYFullB, const int16_t *VYFullF, const uint8_t *MaskB, const uint8_t *MaskF, int nPelLog, const __m256i &dwords_time256, const __m256i &dwords_256_time256, const __m256i &dwords_ref_pitch, const __m256i &dwords_hoffsets) { __m256i dwords_w = _mm256_add_epi32(_mm256_set1_epi32(w << nPelLog), dwords_hoffsets); __m256i dstF = lookup_AVX2(VXFullF, VYFullF, prefF, w, dwords_time256, dwords_ref_pitch, dwords_w); __m256i dstB = lookup_AVX2(VXFullB, VYFullB, prefB, w, dwords_256_time256, dwords_ref_pitch, dwords_w); __m256i maskf = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)&MaskF[w])); __m256i maskb = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)&MaskB[w])); const __m256i dwords_255 = _mm256_set1_epi32(255); __m256i maskf_inv = _mm256_sub_epi32(dwords_255, maskf); __m256i maskb_inv = _mm256_sub_epi32(dwords_255, maskb); __m256i f, b; if (sizeof(PixelType) == 1) { __m256i dstF_dstB = _mm256_or_si256(dstF, _mm256_slli_epi32(dstB, 16)); maskf = _mm256_or_si256(_mm256_slli_epi32(maskf, 16), maskf_inv); maskb = _mm256_or_si256(maskb, _mm256_slli_epi32(maskb_inv, 16)); f = _mm256_madd_epi16(dstF_dstB, maskf); b = _mm256_madd_epi16(dstF_dstB, maskb); } else { __m256i dstF_maskf_inv = _mm256_mullo_epi32(dstF, maskf_inv); __m256i dstB_maskb_inv = _mm256_mullo_epi32(dstB, maskb_inv); __m256i dstB_maskf = _mm256_mullo_epi32(dstB, maskf); __m256i dstF_maskb = _mm256_mullo_epi32(dstF, maskb); f = _mm256_add_epi32(dstF_maskf_inv, dstB_maskf); b = _mm256_add_epi32(dstB_maskb_inv, dstF_maskb); } f = _mm256_add_epi32(f, dwords_255); b = _mm256_add_epi32(b, dwords_255); f = _mm256_srai_epi32(f, 8); b = _mm256_srai_epi32(b, 8); if (sizeof(PixelType) == 1) { f = _mm256_madd_epi16(f, dwords_256_time256); b = _mm256_madd_epi16(b, dwords_time256); } else { f = _mm256_mullo_epi32(f, dwords_256_time256); b = _mm256_mullo_epi32(b, dwords_time256); } __m256i dst = _mm256_add_epi32(f, b); dst = _mm256_srai_epi32(dst, 8); dst = _mm256_packus_epi32(dst, dst); dst = _mm256_permute4x64_epi64(dst, 0xe8); // 0b11101000 - copy third qword to second qword __m128i dst128 = _mm256_castsi256_si128(dst); if (sizeof(PixelType) == 1) { dst128 = _mm_packus_epi16(dst128, dst128); _mm_storel_epi64((__m128i *)&pdst[w], dst128); } else { _mm_storeu_si128((__m128i *)&pdst[w], dst128); } }
SIMD_INLINE __m256i SquaredDifference(__m256i a, __m256i b) { const __m256i lo = SubUnpackedU8<0>(a, b); const __m256i hi = SubUnpackedU8<1>(a, b); return _mm256_add_epi32(_mm256_madd_epi16(lo, lo), _mm256_madd_epi16(hi, hi)); }
/** * \brief quantize transformed coefficents * */ void kvz_quant_flat_avx2(const encoder_state_t * const state, coeff_t *coef, coeff_t *q_coef, int32_t width, int32_t height, int8_t type, int8_t scan_idx, int8_t block_type) { const encoder_control_t * const encoder = state->encoder_control; const uint32_t log2_block_size = kvz_g_convert_to_bit[width] + 2; const uint32_t * const scan = kvz_g_sig_last_scan[scan_idx][log2_block_size - 1]; int32_t qp_scaled = kvz_get_scaled_qp(type, state->global->QP, (encoder->bitdepth - 8) * 6); const uint32_t log2_tr_size = kvz_g_convert_to_bit[width] + 2; const int32_t scalinglist_type = (block_type == CU_INTRA ? 0 : 3) + (int8_t)("\0\3\1\2"[type]); const int32_t *quant_coeff = encoder->scaling_list.quant_coeff[log2_tr_size - 2][scalinglist_type][qp_scaled % 6]; const int32_t transform_shift = MAX_TR_DYNAMIC_RANGE - encoder->bitdepth - log2_tr_size; //!< Represents scaling through forward transform const int32_t q_bits = QUANT_SHIFT + qp_scaled / 6 + transform_shift; const int32_t add = ((state->global->slicetype == KVZ_SLICE_I) ? 171 : 85) << (q_bits - 9); const int32_t q_bits8 = q_bits - 8; assert(quant_coeff[0] <= (1 << 15) - 1 && quant_coeff[0] >= -(1 << 15)); //Assuming flat values to fit int16_t uint32_t ac_sum = 0; __m256i v_ac_sum = _mm256_setzero_si256(); __m256i v_quant_coeff = _mm256_set1_epi16(quant_coeff[0]); for (int32_t n = 0; n < width * height; n += 16) { __m256i v_level = _mm256_loadu_si256((__m256i*)&(coef[n])); __m256i v_sign = _mm256_cmpgt_epi16(_mm256_setzero_si256(), v_level); v_sign = _mm256_or_si256(v_sign, _mm256_set1_epi16(1)); v_level = _mm256_abs_epi16(v_level); __m256i low_a = _mm256_unpacklo_epi16(v_level, _mm256_set1_epi16(0)); __m256i high_a = _mm256_unpackhi_epi16(v_level, _mm256_set1_epi16(0)); __m256i low_b = _mm256_unpacklo_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i high_b = _mm256_unpackhi_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i v_level32_a = _mm256_madd_epi16(low_a, low_b); __m256i v_level32_b = _mm256_madd_epi16(high_a, high_b); v_level32_a = _mm256_add_epi32(v_level32_a, _mm256_set1_epi32(add)); v_level32_b = _mm256_add_epi32(v_level32_b, _mm256_set1_epi32(add)); v_level32_a = _mm256_srai_epi32(v_level32_a, q_bits); v_level32_b = _mm256_srai_epi32(v_level32_b, q_bits); v_level = _mm256_packs_epi32(v_level32_a, v_level32_b); v_level = _mm256_sign_epi16(v_level, v_sign); _mm256_storeu_si256((__m256i*)&(q_coef[n]), v_level); v_ac_sum = _mm256_add_epi32(v_ac_sum, v_level32_a); v_ac_sum = _mm256_add_epi32(v_ac_sum, v_level32_b); } __m128i temp = _mm_add_epi32(_mm256_castsi256_si128(v_ac_sum), _mm256_extracti128_si256(v_ac_sum, 1)); temp = _mm_add_epi32(temp, _mm_shuffle_epi32(temp, KVZ_PERMUTE(2, 3, 0, 1))); temp = _mm_add_epi32(temp, _mm_shuffle_epi32(temp, KVZ_PERMUTE(1, 0, 1, 0))); ac_sum += _mm_cvtsi128_si32(temp); if (!(encoder->sign_hiding && ac_sum >= 2)) return; int32_t delta_u[LCU_WIDTH*LCU_WIDTH >> 2]; for (int32_t n = 0; n < width * height; n += 16) { __m256i v_level = _mm256_loadu_si256((__m256i*)&(coef[n])); v_level = _mm256_abs_epi16(v_level); __m256i low_a = _mm256_unpacklo_epi16(v_level, _mm256_set1_epi16(0)); __m256i high_a = _mm256_unpackhi_epi16(v_level, _mm256_set1_epi16(0)); __m256i low_b = _mm256_unpacklo_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i high_b = _mm256_unpackhi_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i v_level32_a = _mm256_madd_epi16(low_a, low_b); __m256i v_level32_b = _mm256_madd_epi16(high_a, high_b); v_level32_a = _mm256_add_epi32(v_level32_a, _mm256_set1_epi32(add)); v_level32_b = _mm256_add_epi32(v_level32_b, _mm256_set1_epi32(add)); v_level32_a = _mm256_srai_epi32(v_level32_a, q_bits); v_level32_b = _mm256_srai_epi32(v_level32_b, q_bits); v_level = _mm256_packs_epi32(v_level32_a, v_level32_b); __m256i v_coef = _mm256_loadu_si256((__m256i*)&(coef[n])); __m256i v_coef_a = _mm256_unpacklo_epi16(_mm256_abs_epi16(v_coef), _mm256_set1_epi16(0)); __m256i v_coef_b = _mm256_unpackhi_epi16(_mm256_abs_epi16(v_coef), _mm256_set1_epi16(0)); __m256i v_quant_coeff_a = _mm256_unpacklo_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i v_quant_coeff_b = _mm256_unpackhi_epi16(v_quant_coeff, _mm256_set1_epi16(0)); v_coef_a = _mm256_madd_epi16(v_coef_a, v_quant_coeff_a); v_coef_b = _mm256_madd_epi16(v_coef_b, v_quant_coeff_b); v_coef_a = _mm256_sub_epi32(v_coef_a, _mm256_slli_epi32(_mm256_unpacklo_epi16(v_level, _mm256_set1_epi16(0)), q_bits) ); v_coef_b = _mm256_sub_epi32(v_coef_b, _mm256_slli_epi32(_mm256_unpackhi_epi16(v_level, _mm256_set1_epi16(0)), q_bits) ); v_coef_a = _mm256_srai_epi32(v_coef_a, q_bits8); v_coef_b = _mm256_srai_epi32(v_coef_b, q_bits8); _mm_storeu_si128((__m128i*)&(delta_u[n+0*4]), _mm256_castsi256_si128(v_coef_a)); _mm_storeu_si128((__m128i*)&(delta_u[n+2*4]), _mm256_extracti128_si256(v_coef_a, 1)); _mm_storeu_si128((__m128i*)&(delta_u[n+1*4]), _mm256_castsi256_si128(v_coef_b)); _mm_storeu_si128((__m128i*)&(delta_u[n+3*4]), _mm256_extracti128_si256(v_coef_b, 1)); } if (ac_sum >= 2) { #define SCAN_SET_SIZE 16 #define LOG2_SCAN_SET_SIZE 4 int32_t n, last_cg = -1, abssum = 0, subset, subpos; for (subset = (width*height - 1) >> LOG2_SCAN_SET_SIZE; subset >= 0; subset--) { int32_t first_nz_pos_in_cg = SCAN_SET_SIZE, last_nz_pos_in_cg = -1; subpos = subset << LOG2_SCAN_SET_SIZE; abssum = 0; // Find last coeff pos for (n = SCAN_SET_SIZE - 1; n >= 0; n--) { if (q_coef[scan[n + subpos]]) { last_nz_pos_in_cg = n; break; } } // First coeff pos for (n = 0; n <SCAN_SET_SIZE; n++) { if (q_coef[scan[n + subpos]]) { first_nz_pos_in_cg = n; break; } } // Sum all kvz_quant coeffs between first and last for (n = first_nz_pos_in_cg; n <= last_nz_pos_in_cg; n++) { abssum += q_coef[scan[n + subpos]]; } if (last_nz_pos_in_cg >= 0 && last_cg == -1) { last_cg = 1; } if (last_nz_pos_in_cg - first_nz_pos_in_cg >= 4) { int32_t signbit = (q_coef[scan[subpos + first_nz_pos_in_cg]] > 0 ? 0 : 1); if (signbit != (abssum & 0x1)) { // compare signbit with sum_parity int32_t min_cost_inc = 0x7fffffff, min_pos = -1, cur_cost = 0x7fffffff; int16_t final_change = 0, cur_change = 0; for (n = (last_cg == 1 ? last_nz_pos_in_cg : SCAN_SET_SIZE - 1); n >= 0; n--) { uint32_t blkPos = scan[n + subpos]; if (q_coef[blkPos] != 0) { if (delta_u[blkPos] > 0) { cur_cost = -delta_u[blkPos]; cur_change = 1; } else if (n == first_nz_pos_in_cg && abs(q_coef[blkPos]) == 1) { cur_cost = 0x7fffffff; } else { cur_cost = delta_u[blkPos]; cur_change = -1; } } else if (n < first_nz_pos_in_cg && ((coef[blkPos] >= 0) ? 0 : 1) != signbit) { cur_cost = 0x7fffffff; } else { cur_cost = -delta_u[blkPos]; cur_change = 1; } if (cur_cost < min_cost_inc) { min_cost_inc = cur_cost; final_change = cur_change; min_pos = blkPos; } } // CG loop if (q_coef[min_pos] == 32767 || q_coef[min_pos] == -32768) { final_change = -1; } if (coef[min_pos] >= 0) q_coef[min_pos] += final_change; else q_coef[min_pos] -= final_change; } // Hide } if (last_cg == 1) last_cg = 0; } #undef SCAN_SET_SIZE #undef LOG2_SCAN_SET_SIZE }
void extern avx2_test (void) { x = _mm256_madd_epi16 (x, x); }
__m256i test_mm256_madd_epi16(__m256i a, __m256i b) { // CHECK-LABEL: test_mm256_madd_epi16 // CHECK: call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}) return _mm256_madd_epi16(a, b); }
__m256i test_mm256_madd_epi16(__m256i a, __m256i b) { // CHECK: @llvm.x86.avx2.pmadd.wd return _mm256_madd_epi16(a, b); }
INLINE static void haddwd_accumulate_dual_avx2(__m256i *accumulate, __m256i *ver_row) { __m256i abs_value = _mm256_abs_epi16(*ver_row); *accumulate = _mm256_add_epi32(*accumulate, _mm256_madd_epi16(abs_value, _mm256_set1_epi16(1))); }