static INLINE __m256i calc_mask_avx2(const __m256i mask_base, const __m256i s0, const __m256i s1) { const __m256i diff = _mm256_abs_epi16(_mm256_sub_epi16(s0, s1)); return _mm256_abs_epi16( _mm256_add_epi16(mask_base, _mm256_srli_epi16(diff, 4))); // clamp(diff, 0, 64) can be skiped for diff is always in the range ( 38, 54) }
static void satd_8bit_4x4_dual_avx2( const pred_buffer preds, const kvz_pixel * const orig, unsigned num_modes, unsigned *satds_out) { __m256i original = _mm256_broadcastsi128_si256(_mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)orig))); __m256i pred = _mm256_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)preds[0])); pred = _mm256_inserti128_si256(pred, _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)preds[1])), 1); __m256i diff_lo = _mm256_sub_epi16(pred, original); original = _mm256_broadcastsi128_si256(_mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)(orig + 8)))); pred = _mm256_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)(preds[0] + 8))); pred = _mm256_inserti128_si256(pred, _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)(preds[1] + 8))), 1); __m256i diff_hi = _mm256_sub_epi16(pred, original); //Hor __m256i row0 = _mm256_hadd_epi16(diff_lo, diff_hi); __m256i row1 = _mm256_hsub_epi16(diff_lo, diff_hi); __m256i row2 = _mm256_hadd_epi16(row0, row1); __m256i row3 = _mm256_hsub_epi16(row0, row1); //Ver row0 = _mm256_hadd_epi16(row2, row3); row1 = _mm256_hsub_epi16(row2, row3); row2 = _mm256_hadd_epi16(row0, row1); row3 = _mm256_hsub_epi16(row0, row1); //Abs and sum row2 = _mm256_abs_epi16(row2); row3 = _mm256_abs_epi16(row3); row3 = _mm256_add_epi16(row2, row3); row3 = _mm256_add_epi16(row3, _mm256_shuffle_epi32(row3, KVZ_PERMUTE(2, 3, 0, 1) )); row3 = _mm256_add_epi16(row3, _mm256_shuffle_epi32(row3, KVZ_PERMUTE(1, 0, 1, 0) )); row3 = _mm256_add_epi16(row3, _mm256_shufflelo_epi16(row3, KVZ_PERMUTE(1, 0, 1, 0) )); unsigned sum1 = _mm_extract_epi16(_mm256_castsi256_si128(row3), 0); sum1 = (sum1 + 1) >> 1; unsigned sum2 = _mm_extract_epi16(_mm256_extracti128_si256(row3, 1), 0); sum2 = (sum2 + 1) >> 1; satds_out[0] = sum1; satds_out[1] = sum2; }
static INLINE unsigned int highbd_masked_sad16xh_avx2( const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride, const uint8_t *m_ptr, int m_stride, int width, int height) { const uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src8); const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8); const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8); int x, y; __m256i res = _mm256_setzero_si256(); const __m256i mask_max = _mm256_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS)); const __m256i round_const = _mm256_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1); const __m256i one = _mm256_set1_epi16(1); for (y = 0; y < height; y++) { for (x = 0; x < width; x += 16) { const __m256i src = _mm256_lddqu_si256((const __m256i *)&src_ptr[x]); const __m256i a = _mm256_lddqu_si256((const __m256i *)&a_ptr[x]); const __m256i b = _mm256_lddqu_si256((const __m256i *)&b_ptr[x]); // Zero-extend mask to 16 bits const __m256i m = _mm256_cvtepu8_epi16(_mm_lddqu_si128((const __m128i *)&m_ptr[x])); const __m256i m_inv = _mm256_sub_epi16(mask_max, m); const __m256i data_l = _mm256_unpacklo_epi16(a, b); const __m256i mask_l = _mm256_unpacklo_epi16(m, m_inv); __m256i pred_l = _mm256_madd_epi16(data_l, mask_l); pred_l = _mm256_srai_epi32(_mm256_add_epi32(pred_l, round_const), AOM_BLEND_A64_ROUND_BITS); const __m256i data_r = _mm256_unpackhi_epi16(a, b); const __m256i mask_r = _mm256_unpackhi_epi16(m, m_inv); __m256i pred_r = _mm256_madd_epi16(data_r, mask_r); pred_r = _mm256_srai_epi32(_mm256_add_epi32(pred_r, round_const), AOM_BLEND_A64_ROUND_BITS); // Note: the maximum value in pred_l/r is (2^bd)-1 < 2^15, // so it is safe to do signed saturation here. const __m256i pred = _mm256_packs_epi32(pred_l, pred_r); // There is no 16-bit SAD instruction, so we have to synthesize // an 8-element SAD. We do this by storing 4 32-bit partial SADs, // and accumulating them at the end const __m256i diff = _mm256_abs_epi16(_mm256_sub_epi16(pred, src)); res = _mm256_add_epi32(res, _mm256_madd_epi16(diff, one)); } src_ptr += src_stride; a_ptr += a_stride; b_ptr += b_stride; m_ptr += m_stride; } // At this point, we have four 32-bit partial SADs stored in 'res'. res = _mm256_hadd_epi32(res, res); res = _mm256_hadd_epi32(res, res); int sad = _mm256_extract_epi32(res, 0) + _mm256_extract_epi32(res, 4); return (sad + 31) >> 6; }
int vpx_satd_avx2(const tran_low_t *coeff, int length) { const __m256i one = _mm256_set1_epi16(1); __m256i accum = _mm256_setzero_si256(); int i; for (i = 0; i < length; i += 16) { const __m256i src_line = load_tran_low(coeff); const __m256i abs = _mm256_abs_epi16(src_line); const __m256i sum = _mm256_madd_epi16(abs, one); accum = _mm256_add_epi32(accum, sum); coeff += 16; } { // 32 bit horizontal add const __m256i a = _mm256_srli_si256(accum, 8); const __m256i b = _mm256_add_epi32(accum, a); const __m256i c = _mm256_srli_epi64(b, 32); const __m256i d = _mm256_add_epi32(b, c); const __m128i accum_128 = _mm_add_epi32(_mm256_castsi256_si128(d), _mm256_extractf128_si256(d, 1)); return _mm_cvtsi128_si32(accum_128); } }
void av1_build_compound_diffwtd_mask_highbd_avx2( uint8_t *mask, DIFFWTD_MASK_TYPE mask_type, const uint8_t *src0, int src0_stride, const uint8_t *src1, int src1_stride, int h, int w, int bd) { if (w < 16) { av1_build_compound_diffwtd_mask_highbd_ssse3( mask, mask_type, src0, src0_stride, src1, src1_stride, h, w, bd); } else { assert(mask_type == DIFFWTD_38 || mask_type == DIFFWTD_38_INV); assert(bd >= 8); assert((w % 16) == 0); const __m256i y0 = _mm256_setzero_si256(); const __m256i yAOM_BLEND_A64_MAX_ALPHA = _mm256_set1_epi16(AOM_BLEND_A64_MAX_ALPHA); const int mask_base = 38; const __m256i ymask_base = _mm256_set1_epi16(mask_base); const uint16_t *ssrc0 = CONVERT_TO_SHORTPTR(src0); const uint16_t *ssrc1 = CONVERT_TO_SHORTPTR(src1); if (bd == 8) { if (mask_type == DIFFWTD_38_INV) { for (int i = 0; i < h; ++i) { for (int j = 0; j < w; j += 16) { __m256i s0 = _mm256_loadu_si256((const __m256i *)&ssrc0[j]); __m256i s1 = _mm256_loadu_si256((const __m256i *)&ssrc1[j]); __m256i diff = _mm256_srai_epi16( _mm256_abs_epi16(_mm256_sub_epi16(s0, s1)), DIFF_FACTOR_LOG2); __m256i m = _mm256_min_epi16( _mm256_max_epi16(y0, _mm256_add_epi16(diff, ymask_base)), yAOM_BLEND_A64_MAX_ALPHA); m = _mm256_sub_epi16(yAOM_BLEND_A64_MAX_ALPHA, m); m = _mm256_packus_epi16(m, m); m = _mm256_permute4x64_epi64(m, _MM_SHUFFLE(0, 0, 2, 0)); __m128i m0 = _mm256_castsi256_si128(m); _mm_storeu_si128((__m128i *)&mask[j], m0); } ssrc0 += src0_stride; ssrc1 += src1_stride; mask += w; } } else { for (int i = 0; i < h; ++i) { for (int j = 0; j < w; j += 16) { __m256i s0 = _mm256_loadu_si256((const __m256i *)&ssrc0[j]); __m256i s1 = _mm256_loadu_si256((const __m256i *)&ssrc1[j]); __m256i diff = _mm256_srai_epi16( _mm256_abs_epi16(_mm256_sub_epi16(s0, s1)), DIFF_FACTOR_LOG2); __m256i m = _mm256_min_epi16( _mm256_max_epi16(y0, _mm256_add_epi16(diff, ymask_base)), yAOM_BLEND_A64_MAX_ALPHA); m = _mm256_packus_epi16(m, m); m = _mm256_permute4x64_epi64(m, _MM_SHUFFLE(0, 0, 2, 0)); __m128i m0 = _mm256_castsi256_si128(m); _mm_storeu_si128((__m128i *)&mask[j], m0); } ssrc0 += src0_stride; ssrc1 += src1_stride; mask += w; } } } else { const __m128i xshift = xx_set1_64_from_32i(bd - 8 + DIFF_FACTOR_LOG2); if (mask_type == DIFFWTD_38_INV) { for (int i = 0; i < h; ++i) { for (int j = 0; j < w; j += 16) { __m256i s0 = _mm256_loadu_si256((const __m256i *)&ssrc0[j]); __m256i s1 = _mm256_loadu_si256((const __m256i *)&ssrc1[j]); __m256i diff = _mm256_sra_epi16( _mm256_abs_epi16(_mm256_sub_epi16(s0, s1)), xshift); __m256i m = _mm256_min_epi16( _mm256_max_epi16(y0, _mm256_add_epi16(diff, ymask_base)), yAOM_BLEND_A64_MAX_ALPHA); m = _mm256_sub_epi16(yAOM_BLEND_A64_MAX_ALPHA, m); m = _mm256_packus_epi16(m, m); m = _mm256_permute4x64_epi64(m, _MM_SHUFFLE(0, 0, 2, 0)); __m128i m0 = _mm256_castsi256_si128(m); _mm_storeu_si128((__m128i *)&mask[j], m0); } ssrc0 += src0_stride; ssrc1 += src1_stride; mask += w; } } else { for (int i = 0; i < h; ++i) { for (int j = 0; j < w; j += 16) { __m256i s0 = _mm256_loadu_si256((const __m256i *)&ssrc0[j]); __m256i s1 = _mm256_loadu_si256((const __m256i *)&ssrc1[j]); __m256i diff = _mm256_sra_epi16( _mm256_abs_epi16(_mm256_sub_epi16(s0, s1)), xshift); __m256i m = _mm256_min_epi16( _mm256_max_epi16(y0, _mm256_add_epi16(diff, ymask_base)), yAOM_BLEND_A64_MAX_ALPHA); m = _mm256_packus_epi16(m, m); m = _mm256_permute4x64_epi64(m, _MM_SHUFFLE(0, 0, 2, 0)); __m128i m0 = _mm256_castsi256_si128(m); _mm_storeu_si128((__m128i *)&mask[j], m0); } ssrc0 += src0_stride; ssrc1 += src1_stride; mask += w; } } } } }
__m256i test_mm256_abs_epi16(__m256i a) { // CHECK: @llvm.x86.avx2.pabs.w return _mm256_abs_epi16(a); }
__m256i test_mm256_abs_epi16(__m256i a) { // CHECK-LABEL: test_mm256_abs_epi16 // CHECK: call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %{{.*}}) return _mm256_abs_epi16(a); }
/** * \brief quantize transformed coefficents * */ void kvz_quant_flat_avx2(const encoder_state_t * const state, coeff_t *coef, coeff_t *q_coef, int32_t width, int32_t height, int8_t type, int8_t scan_idx, int8_t block_type) { const encoder_control_t * const encoder = state->encoder_control; const uint32_t log2_block_size = kvz_g_convert_to_bit[width] + 2; const uint32_t * const scan = kvz_g_sig_last_scan[scan_idx][log2_block_size - 1]; int32_t qp_scaled = kvz_get_scaled_qp(type, state->global->QP, (encoder->bitdepth - 8) * 6); const uint32_t log2_tr_size = kvz_g_convert_to_bit[width] + 2; const int32_t scalinglist_type = (block_type == CU_INTRA ? 0 : 3) + (int8_t)("\0\3\1\2"[type]); const int32_t *quant_coeff = encoder->scaling_list.quant_coeff[log2_tr_size - 2][scalinglist_type][qp_scaled % 6]; const int32_t transform_shift = MAX_TR_DYNAMIC_RANGE - encoder->bitdepth - log2_tr_size; //!< Represents scaling through forward transform const int32_t q_bits = QUANT_SHIFT + qp_scaled / 6 + transform_shift; const int32_t add = ((state->global->slicetype == KVZ_SLICE_I) ? 171 : 85) << (q_bits - 9); const int32_t q_bits8 = q_bits - 8; assert(quant_coeff[0] <= (1 << 15) - 1 && quant_coeff[0] >= -(1 << 15)); //Assuming flat values to fit int16_t uint32_t ac_sum = 0; __m256i v_ac_sum = _mm256_setzero_si256(); __m256i v_quant_coeff = _mm256_set1_epi16(quant_coeff[0]); for (int32_t n = 0; n < width * height; n += 16) { __m256i v_level = _mm256_loadu_si256((__m256i*)&(coef[n])); __m256i v_sign = _mm256_cmpgt_epi16(_mm256_setzero_si256(), v_level); v_sign = _mm256_or_si256(v_sign, _mm256_set1_epi16(1)); v_level = _mm256_abs_epi16(v_level); __m256i low_a = _mm256_unpacklo_epi16(v_level, _mm256_set1_epi16(0)); __m256i high_a = _mm256_unpackhi_epi16(v_level, _mm256_set1_epi16(0)); __m256i low_b = _mm256_unpacklo_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i high_b = _mm256_unpackhi_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i v_level32_a = _mm256_madd_epi16(low_a, low_b); __m256i v_level32_b = _mm256_madd_epi16(high_a, high_b); v_level32_a = _mm256_add_epi32(v_level32_a, _mm256_set1_epi32(add)); v_level32_b = _mm256_add_epi32(v_level32_b, _mm256_set1_epi32(add)); v_level32_a = _mm256_srai_epi32(v_level32_a, q_bits); v_level32_b = _mm256_srai_epi32(v_level32_b, q_bits); v_level = _mm256_packs_epi32(v_level32_a, v_level32_b); v_level = _mm256_sign_epi16(v_level, v_sign); _mm256_storeu_si256((__m256i*)&(q_coef[n]), v_level); v_ac_sum = _mm256_add_epi32(v_ac_sum, v_level32_a); v_ac_sum = _mm256_add_epi32(v_ac_sum, v_level32_b); } __m128i temp = _mm_add_epi32(_mm256_castsi256_si128(v_ac_sum), _mm256_extracti128_si256(v_ac_sum, 1)); temp = _mm_add_epi32(temp, _mm_shuffle_epi32(temp, KVZ_PERMUTE(2, 3, 0, 1))); temp = _mm_add_epi32(temp, _mm_shuffle_epi32(temp, KVZ_PERMUTE(1, 0, 1, 0))); ac_sum += _mm_cvtsi128_si32(temp); if (!(encoder->sign_hiding && ac_sum >= 2)) return; int32_t delta_u[LCU_WIDTH*LCU_WIDTH >> 2]; for (int32_t n = 0; n < width * height; n += 16) { __m256i v_level = _mm256_loadu_si256((__m256i*)&(coef[n])); v_level = _mm256_abs_epi16(v_level); __m256i low_a = _mm256_unpacklo_epi16(v_level, _mm256_set1_epi16(0)); __m256i high_a = _mm256_unpackhi_epi16(v_level, _mm256_set1_epi16(0)); __m256i low_b = _mm256_unpacklo_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i high_b = _mm256_unpackhi_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i v_level32_a = _mm256_madd_epi16(low_a, low_b); __m256i v_level32_b = _mm256_madd_epi16(high_a, high_b); v_level32_a = _mm256_add_epi32(v_level32_a, _mm256_set1_epi32(add)); v_level32_b = _mm256_add_epi32(v_level32_b, _mm256_set1_epi32(add)); v_level32_a = _mm256_srai_epi32(v_level32_a, q_bits); v_level32_b = _mm256_srai_epi32(v_level32_b, q_bits); v_level = _mm256_packs_epi32(v_level32_a, v_level32_b); __m256i v_coef = _mm256_loadu_si256((__m256i*)&(coef[n])); __m256i v_coef_a = _mm256_unpacklo_epi16(_mm256_abs_epi16(v_coef), _mm256_set1_epi16(0)); __m256i v_coef_b = _mm256_unpackhi_epi16(_mm256_abs_epi16(v_coef), _mm256_set1_epi16(0)); __m256i v_quant_coeff_a = _mm256_unpacklo_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i v_quant_coeff_b = _mm256_unpackhi_epi16(v_quant_coeff, _mm256_set1_epi16(0)); v_coef_a = _mm256_madd_epi16(v_coef_a, v_quant_coeff_a); v_coef_b = _mm256_madd_epi16(v_coef_b, v_quant_coeff_b); v_coef_a = _mm256_sub_epi32(v_coef_a, _mm256_slli_epi32(_mm256_unpacklo_epi16(v_level, _mm256_set1_epi16(0)), q_bits) ); v_coef_b = _mm256_sub_epi32(v_coef_b, _mm256_slli_epi32(_mm256_unpackhi_epi16(v_level, _mm256_set1_epi16(0)), q_bits) ); v_coef_a = _mm256_srai_epi32(v_coef_a, q_bits8); v_coef_b = _mm256_srai_epi32(v_coef_b, q_bits8); _mm_storeu_si128((__m128i*)&(delta_u[n+0*4]), _mm256_castsi256_si128(v_coef_a)); _mm_storeu_si128((__m128i*)&(delta_u[n+2*4]), _mm256_extracti128_si256(v_coef_a, 1)); _mm_storeu_si128((__m128i*)&(delta_u[n+1*4]), _mm256_castsi256_si128(v_coef_b)); _mm_storeu_si128((__m128i*)&(delta_u[n+3*4]), _mm256_extracti128_si256(v_coef_b, 1)); } if (ac_sum >= 2) { #define SCAN_SET_SIZE 16 #define LOG2_SCAN_SET_SIZE 4 int32_t n, last_cg = -1, abssum = 0, subset, subpos; for (subset = (width*height - 1) >> LOG2_SCAN_SET_SIZE; subset >= 0; subset--) { int32_t first_nz_pos_in_cg = SCAN_SET_SIZE, last_nz_pos_in_cg = -1; subpos = subset << LOG2_SCAN_SET_SIZE; abssum = 0; // Find last coeff pos for (n = SCAN_SET_SIZE - 1; n >= 0; n--) { if (q_coef[scan[n + subpos]]) { last_nz_pos_in_cg = n; break; } } // First coeff pos for (n = 0; n <SCAN_SET_SIZE; n++) { if (q_coef[scan[n + subpos]]) { first_nz_pos_in_cg = n; break; } } // Sum all kvz_quant coeffs between first and last for (n = first_nz_pos_in_cg; n <= last_nz_pos_in_cg; n++) { abssum += q_coef[scan[n + subpos]]; } if (last_nz_pos_in_cg >= 0 && last_cg == -1) { last_cg = 1; } if (last_nz_pos_in_cg - first_nz_pos_in_cg >= 4) { int32_t signbit = (q_coef[scan[subpos + first_nz_pos_in_cg]] > 0 ? 0 : 1); if (signbit != (abssum & 0x1)) { // compare signbit with sum_parity int32_t min_cost_inc = 0x7fffffff, min_pos = -1, cur_cost = 0x7fffffff; int16_t final_change = 0, cur_change = 0; for (n = (last_cg == 1 ? last_nz_pos_in_cg : SCAN_SET_SIZE - 1); n >= 0; n--) { uint32_t blkPos = scan[n + subpos]; if (q_coef[blkPos] != 0) { if (delta_u[blkPos] > 0) { cur_cost = -delta_u[blkPos]; cur_change = 1; } else if (n == first_nz_pos_in_cg && abs(q_coef[blkPos]) == 1) { cur_cost = 0x7fffffff; } else { cur_cost = delta_u[blkPos]; cur_change = -1; } } else if (n < first_nz_pos_in_cg && ((coef[blkPos] >= 0) ? 0 : 1) != signbit) { cur_cost = 0x7fffffff; } else { cur_cost = -delta_u[blkPos]; cur_change = 1; } if (cur_cost < min_cost_inc) { min_cost_inc = cur_cost; final_change = cur_change; min_pos = blkPos; } } // CG loop if (q_coef[min_pos] == 32767 || q_coef[min_pos] == -32768) { final_change = -1; } if (coef[min_pos] >= 0) q_coef[min_pos] += final_change; else q_coef[min_pos] -= final_change; } // Hide } if (last_cg == 1) last_cg = 0; } #undef SCAN_SET_SIZE #undef LOG2_SCAN_SET_SIZE }
INLINE static void haddwd_accumulate_dual_avx2(__m256i *accumulate, __m256i *ver_row) { __m256i abs_value = _mm256_abs_epi16(*ver_row); *accumulate = _mm256_add_epi32(*accumulate, _mm256_madd_epi16(abs_value, _mm256_set1_epi16(1))); }