static INLINE unsigned int highbd_masked_sad16xh_avx2( const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride, const uint8_t *m_ptr, int m_stride, int width, int height) { const uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src8); const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8); const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8); int x, y; __m256i res = _mm256_setzero_si256(); const __m256i mask_max = _mm256_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS)); const __m256i round_const = _mm256_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1); const __m256i one = _mm256_set1_epi16(1); for (y = 0; y < height; y++) { for (x = 0; x < width; x += 16) { const __m256i src = _mm256_lddqu_si256((const __m256i *)&src_ptr[x]); const __m256i a = _mm256_lddqu_si256((const __m256i *)&a_ptr[x]); const __m256i b = _mm256_lddqu_si256((const __m256i *)&b_ptr[x]); // Zero-extend mask to 16 bits const __m256i m = _mm256_cvtepu8_epi16(_mm_lddqu_si128((const __m128i *)&m_ptr[x])); const __m256i m_inv = _mm256_sub_epi16(mask_max, m); const __m256i data_l = _mm256_unpacklo_epi16(a, b); const __m256i mask_l = _mm256_unpacklo_epi16(m, m_inv); __m256i pred_l = _mm256_madd_epi16(data_l, mask_l); pred_l = _mm256_srai_epi32(_mm256_add_epi32(pred_l, round_const), AOM_BLEND_A64_ROUND_BITS); const __m256i data_r = _mm256_unpackhi_epi16(a, b); const __m256i mask_r = _mm256_unpackhi_epi16(m, m_inv); __m256i pred_r = _mm256_madd_epi16(data_r, mask_r); pred_r = _mm256_srai_epi32(_mm256_add_epi32(pred_r, round_const), AOM_BLEND_A64_ROUND_BITS); // Note: the maximum value in pred_l/r is (2^bd)-1 < 2^15, // so it is safe to do signed saturation here. const __m256i pred = _mm256_packs_epi32(pred_l, pred_r); // There is no 16-bit SAD instruction, so we have to synthesize // an 8-element SAD. We do this by storing 4 32-bit partial SADs, // and accumulating them at the end const __m256i diff = _mm256_abs_epi16(_mm256_sub_epi16(pred, src)); res = _mm256_add_epi32(res, _mm256_madd_epi16(diff, one)); } src_ptr += src_stride; a_ptr += a_stride; b_ptr += b_stride; m_ptr += m_stride; } // At this point, we have four 32-bit partial SADs stored in 'res'. res = _mm256_hadd_epi32(res, res); res = _mm256_hadd_epi32(res, res); int sad = _mm256_extract_epi32(res, 0) + _mm256_extract_epi32(res, 4); return (sad + 31) >> 6; }
void fft128_2way( void *a ) { int i; // Temp space to help for interleaving in the end __m256i B[8]; __m256i *A = (__m256i*) a; // __m256i *Twiddle = (__m256i*)FFT128_Twiddle; /* Size-2 butterflies */ for ( i = 0; i<8; i++ ) { B[ i ] = _mm256_add_epi16( A[ i ], A[ i+8 ] ); B[ i ] = REDUCE_FULL_S( B[ i ] ); A[ i+8 ] = _mm256_sub_epi16( A[ i ], A[ i+8 ] ); A[ i+8 ] = REDUCE_FULL_S( A[ i+8 ] ); A[ i+8 ] = _mm256_mullo_epi16( A[ i+8 ], FFT128_Twiddle[i].m256i ); A[ i+8 ] = REDUCE_FULL_S( A[ i+8 ] ); } fft64_2way( B ); fft64_2way( A+8 ); /* Transpose (i.e. interleave) */ for ( i = 0; i < 8; i++ ) { A[ 2*i ] = _mm256_unpacklo_epi16( B[ i ], A[ i+8 ] ); A[ 2*i+1 ] = _mm256_unpackhi_epi16( B[ i ], A[ i+8 ] ); } }
/* Routine optimized for shuffling a buffer for a type size of 16 bytes. */ static void shuffle16_avx2(uint8_t* const dest, const uint8_t* const src, const size_t vectorizable_elements, const size_t total_elements) { static const size_t bytesoftype = 16; size_t j; int k, l; __m256i ymm0[16], ymm1[16]; /* Create the shuffle mask. NOTE: The XMM/YMM 'set' intrinsics require the arguments to be ordered from most to least significant (i.e., their order is reversed when compared to loading the mask from an array). */ const __m256i shmask = _mm256_set_epi8( 0x0f, 0x07, 0x0e, 0x06, 0x0d, 0x05, 0x0c, 0x04, 0x0b, 0x03, 0x0a, 0x02, 0x09, 0x01, 0x08, 0x00, 0x0f, 0x07, 0x0e, 0x06, 0x0d, 0x05, 0x0c, 0x04, 0x0b, 0x03, 0x0a, 0x02, 0x09, 0x01, 0x08, 0x00); for (j = 0; j < vectorizable_elements; j += sizeof(__m256i)) { /* Fetch 32 elements (512 bytes) into 16 YMM registers. */ for (k = 0; k < 16; k++) { ymm0[k] = _mm256_loadu_si256((__m256i*)(src + (j * bytesoftype) + (k * sizeof(__m256i)))); } /* Transpose bytes */ for (k = 0, l = 0; k < 8; k++, l +=2) { ymm1[k*2] = _mm256_unpacklo_epi8(ymm0[l], ymm0[l+1]); ymm1[k*2+1] = _mm256_unpackhi_epi8(ymm0[l], ymm0[l+1]); } /* Transpose words */ for (k = 0, l = -2; k < 8; k++, l++) { if ((k%2) == 0) l += 2; ymm0[k*2] = _mm256_unpacklo_epi16(ymm1[l], ymm1[l+2]); ymm0[k*2+1] = _mm256_unpackhi_epi16(ymm1[l], ymm1[l+2]); } /* Transpose double words */ for (k = 0, l = -4; k < 8; k++, l++) { if ((k%4) == 0) l += 4; ymm1[k*2] = _mm256_unpacklo_epi32(ymm0[l], ymm0[l+4]); ymm1[k*2+1] = _mm256_unpackhi_epi32(ymm0[l], ymm0[l+4]); } /* Transpose quad words */ for (k = 0; k < 8; k++) { ymm0[k*2] = _mm256_unpacklo_epi64(ymm1[k], ymm1[k+8]); ymm0[k*2+1] = _mm256_unpackhi_epi64(ymm1[k], ymm1[k+8]); } for (k = 0; k < 16; k++) { ymm0[k] = _mm256_permute4x64_epi64(ymm0[k], 0xd8); ymm0[k] = _mm256_shuffle_epi8(ymm0[k], shmask); } /* Store the result vectors */ uint8_t* const dest_for_jth_element = dest + j; for (k = 0; k < 16; k++) { _mm256_storeu_si256((__m256i*)(dest_for_jth_element + (k * total_elements)), ymm0[k]); } } }
/* Routine optimized for shuffling a buffer for a type size of 8 bytes. */ static void shuffle8_avx2(uint8_t* const dest, const uint8_t* const src, const size_t vectorizable_elements, const size_t total_elements) { static const size_t bytesoftype = 8; size_t j; int k, l; __m256i ymm0[8], ymm1[8]; for (j = 0; j < vectorizable_elements; j += sizeof(__m256i)) { /* Fetch 32 elements (256 bytes) then transpose bytes. */ for (k = 0; k < 8; k++) { ymm0[k] = _mm256_loadu_si256((__m256i*)(src + (j * bytesoftype) + (k * sizeof(__m256i)))); ymm1[k] = _mm256_shuffle_epi32(ymm0[k], 0x4e); ymm1[k] = _mm256_unpacklo_epi8(ymm0[k], ymm1[k]); } /* Transpose words */ for (k = 0, l = 0; k < 4; k++, l +=2) { ymm0[k*2] = _mm256_unpacklo_epi16(ymm1[l], ymm1[l+1]); ymm0[k*2+1] = _mm256_unpackhi_epi16(ymm1[l], ymm1[l+1]); } /* Transpose double words */ for (k = 0, l = 0; k < 4; k++, l++) { if (k == 2) l += 2; ymm1[k*2] = _mm256_unpacklo_epi32(ymm0[l], ymm0[l+2]); ymm1[k*2+1] = _mm256_unpackhi_epi32(ymm0[l], ymm0[l+2]); } /* Transpose quad words */ for (k = 0; k < 4; k++) { ymm0[k*2] = _mm256_unpacklo_epi64(ymm1[k], ymm1[k+4]); ymm0[k*2+1] = _mm256_unpackhi_epi64(ymm1[k], ymm1[k+4]); } for(k = 0; k < 8; k++) { ymm1[k] = _mm256_permute4x64_epi64(ymm0[k], 0x72); ymm0[k] = _mm256_permute4x64_epi64(ymm0[k], 0xD8); ymm0[k] = _mm256_unpacklo_epi16(ymm0[k], ymm1[k]); } /* Store the result vectors */ uint8_t* const dest_for_jth_element = dest + j; for (k = 0; k < 8; k++) { _mm256_storeu_si256((__m256i*)(dest_for_jth_element + (k * total_elements)), ymm0[k]); } } }
/* Routine optimized for unshuffling a buffer for a type size of 8 bytes. */ static void unshuffle8_avx2(uint8_t* const dest, const uint8_t* const src, const size_t vectorizable_elements, const size_t total_elements) { static const size_t bytesoftype = 8; size_t i; int j; __m256i ymm0[8], ymm1[8]; for (i = 0; i < vectorizable_elements; i += sizeof(__m256i)) { /* Fetch 32 elements (256 bytes) into 8 YMM registers. */ const uint8_t* const src_for_ith_element = src + i; for (j = 0; j < 8; j++) { ymm0[j] = _mm256_loadu_si256((__m256i*)(src_for_ith_element + (j * total_elements))); } /* Shuffle bytes */ for (j = 0; j < 4; j++) { /* Compute the low 32 bytes */ ymm1[j] = _mm256_unpacklo_epi8(ymm0[j*2], ymm0[j*2+1]); /* Compute the hi 32 bytes */ ymm1[4+j] = _mm256_unpackhi_epi8(ymm0[j*2], ymm0[j*2+1]); } /* Shuffle words */ for (j = 0; j < 4; j++) { /* Compute the low 32 bytes */ ymm0[j] = _mm256_unpacklo_epi16(ymm1[j*2], ymm1[j*2+1]); /* Compute the hi 32 bytes */ ymm0[4+j] = _mm256_unpackhi_epi16(ymm1[j*2], ymm1[j*2+1]); } for (j = 0; j < 8; j++) { ymm0[j] = _mm256_permute4x64_epi64(ymm0[j], 0xd8); } /* Shuffle 4-byte dwords */ for (j = 0; j < 4; j++) { /* Compute the low 32 bytes */ ymm1[j] = _mm256_unpacklo_epi32(ymm0[j*2], ymm0[j*2+1]); /* Compute the hi 32 bytes */ ymm1[4+j] = _mm256_unpackhi_epi32(ymm0[j*2], ymm0[j*2+1]); } /* Store the result vectors in proper order */ _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (0 * sizeof(__m256i))), ymm1[0]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (1 * sizeof(__m256i))), ymm1[2]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (2 * sizeof(__m256i))), ymm1[1]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (3 * sizeof(__m256i))), ymm1[3]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (4 * sizeof(__m256i))), ymm1[4]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (5 * sizeof(__m256i))), ymm1[6]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (6 * sizeof(__m256i))), ymm1[5]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (7 * sizeof(__m256i))), ymm1[7]); } }
template <bool align> SIMD_INLINE void Bgr48pToBgra32(uint8_t * bgra, const uint8_t * blue, const uint8_t * green, const uint8_t * red, size_t offset, __m256i alpha) { __m256i _blue = _mm256_and_si256(LoadPermuted<align>((__m256i*)(blue + offset)), K16_00FF); __m256i _green = _mm256_and_si256(LoadPermuted<align>((__m256i*)(green + offset)), K16_00FF); __m256i _red = _mm256_and_si256(LoadPermuted<align>((__m256i*)(red + offset)), K16_00FF); __m256i bg = _mm256_or_si256(_blue, _mm256_slli_si256(_green, 1)); __m256i ra = _mm256_or_si256(_red, alpha); Store<align>((__m256i*)bgra + 0, _mm256_unpacklo_epi16(bg, ra)); Store<align>((__m256i*)bgra + 1, _mm256_unpackhi_epi16(bg, ra)); }
static INLINE __m256i highbd_comp_mask_pred_line_avx2(const __m256i s0, const __m256i s1, const __m256i a) { const __m256i alpha_max = _mm256_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS)); const __m256i round_const = _mm256_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1); const __m256i a_inv = _mm256_sub_epi16(alpha_max, a); const __m256i s_lo = _mm256_unpacklo_epi16(s0, s1); const __m256i a_lo = _mm256_unpacklo_epi16(a, a_inv); const __m256i pred_lo = _mm256_madd_epi16(s_lo, a_lo); const __m256i pred_l = _mm256_srai_epi32( _mm256_add_epi32(pred_lo, round_const), AOM_BLEND_A64_ROUND_BITS); const __m256i s_hi = _mm256_unpackhi_epi16(s0, s1); const __m256i a_hi = _mm256_unpackhi_epi16(a, a_inv); const __m256i pred_hi = _mm256_madd_epi16(s_hi, a_hi); const __m256i pred_h = _mm256_srai_epi32( _mm256_add_epi32(pred_hi, round_const), AOM_BLEND_A64_ROUND_BITS); const __m256i comp = _mm256_packs_epi32(pred_l, pred_h); return comp; }
/* Routine optimized for shuffling a buffer for a type size of 4 bytes. */ static void shuffle4_avx2(uint8_t* const dest, const uint8_t* const src, const size_t vectorizable_elements, const size_t total_elements) { static const size_t bytesoftype = 4; size_t i; int j; __m256i ymm0[4], ymm1[4]; /* Create the shuffle mask. NOTE: The XMM/YMM 'set' intrinsics require the arguments to be ordered from most to least significant (i.e., their order is reversed when compared to loading the mask from an array). */ const __m256i mask = _mm256_set_epi32( 0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00); for (i = 0; i < vectorizable_elements; i += sizeof(__m256i)) { /* Fetch 32 elements (128 bytes) then transpose bytes and words. */ for (j = 0; j < 4; j++) { ymm0[j] = _mm256_loadu_si256((__m256i*)(src + (i * bytesoftype) + (j * sizeof(__m256i)))); ymm1[j] = _mm256_shuffle_epi32(ymm0[j], 0xd8); ymm0[j] = _mm256_shuffle_epi32(ymm0[j], 0x8d); ymm0[j] = _mm256_unpacklo_epi8(ymm1[j], ymm0[j]); ymm1[j] = _mm256_shuffle_epi32(ymm0[j], 0x04e); ymm0[j] = _mm256_unpacklo_epi16(ymm0[j], ymm1[j]); } /* Transpose double words */ for (j = 0; j < 2; j++) { ymm1[j*2] = _mm256_unpacklo_epi32(ymm0[j*2], ymm0[j*2+1]); ymm1[j*2+1] = _mm256_unpackhi_epi32(ymm0[j*2], ymm0[j*2+1]); } /* Transpose quad words */ for (j = 0; j < 2; j++) { ymm0[j*2] = _mm256_unpacklo_epi64(ymm1[j], ymm1[j+2]); ymm0[j*2+1] = _mm256_unpackhi_epi64(ymm1[j], ymm1[j+2]); } for (j = 0; j < 4; j++) { ymm0[j] = _mm256_permutevar8x32_epi32(ymm0[j], mask); } /* Store the result vectors */ uint8_t* const dest_for_ith_element = dest + i; for (j = 0; j < 4; j++) { _mm256_storeu_si256((__m256i*)(dest_for_ith_element + (j * total_elements)), ymm0[j]); } } }
static INLINE void hor_transform_row_dual_avx2(__m256i* row){ __m256i mask_pos = _mm256_set1_epi16(1); __m256i mask_neg = _mm256_set1_epi16(-1); __m256i sign_mask = _mm256_unpacklo_epi64(mask_pos, mask_neg); __m256i temp = _mm256_shuffle_epi32(*row, KVZ_PERMUTE(2, 3, 0, 1)); *row = _mm256_sign_epi16(*row, sign_mask); *row = _mm256_add_epi16(*row, temp); sign_mask = _mm256_unpacklo_epi32(mask_pos, mask_neg); temp = _mm256_shuffle_epi32(*row, KVZ_PERMUTE(1, 0, 3, 2)); *row = _mm256_sign_epi16(*row, sign_mask); *row = _mm256_add_epi16(*row, temp); sign_mask = _mm256_unpacklo_epi16(mask_pos, mask_neg); temp = _mm256_shufflelo_epi16(*row, KVZ_PERMUTE(1,0,3,2)); temp = _mm256_shufflehi_epi16(temp, KVZ_PERMUTE(1,0,3,2)); *row = _mm256_sign_epi16(*row, sign_mask); *row = _mm256_add_epi16(*row, temp); }
/* Routine optimized for unshuffling a buffer for a type size of 4 bytes. */ static void unshuffle4_avx2(uint8_t* const dest, const uint8_t* const src, const size_t vectorizable_elements, const size_t total_elements) { static const size_t bytesoftype = 4; size_t i; int j; __m256i ymm0[4], ymm1[4]; for (i = 0; i < vectorizable_elements; i += sizeof(__m256i)) { /* Load 32 elements (128 bytes) into 4 YMM registers. */ const uint8_t* const src_for_ith_element = src + i; for (j = 0; j < 4; j++) { ymm0[j] = _mm256_loadu_si256((__m256i*)(src_for_ith_element + (j * total_elements))); } /* Shuffle bytes */ for (j = 0; j < 2; j++) { /* Compute the low 64 bytes */ ymm1[j] = _mm256_unpacklo_epi8(ymm0[j*2], ymm0[j*2+1]); /* Compute the hi 64 bytes */ ymm1[2+j] = _mm256_unpackhi_epi8(ymm0[j*2], ymm0[j*2+1]); } /* Shuffle 2-byte words */ for (j = 0; j < 2; j++) { /* Compute the low 64 bytes */ ymm0[j] = _mm256_unpacklo_epi16(ymm1[j*2], ymm1[j*2+1]); /* Compute the hi 64 bytes */ ymm0[2+j] = _mm256_unpackhi_epi16(ymm1[j*2], ymm1[j*2+1]); } ymm1[0] = _mm256_permute2x128_si256(ymm0[0], ymm0[2], 0x20); ymm1[1] = _mm256_permute2x128_si256(ymm0[1], ymm0[3], 0x20); ymm1[2] = _mm256_permute2x128_si256(ymm0[0], ymm0[2], 0x31); ymm1[3] = _mm256_permute2x128_si256(ymm0[1], ymm0[3], 0x31); /* Store the result vectors in proper order */ for (j = 0; j < 4; j++) { _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (j * sizeof(__m256i))), ymm1[j]); } } }
void cvtScale_s16s32f32Line_AVX2(const short* src, int* dst, float scale, float shift, int width) { int x = 0; __m256 scale256 = _mm256_set1_ps(scale); __m256 shift256 = _mm256_set1_ps(shift); const int shuffle = 0xD8; for (; x <= width - 16; x += 16) { __m256i v_src = _mm256_loadu_si256((const __m256i *)(src + x)); v_src = _mm256_permute4x64_epi64(v_src, shuffle); __m256i v_src_lo = _mm256_srai_epi32(_mm256_unpacklo_epi16(v_src, v_src), 16); __m256i v_src_hi = _mm256_srai_epi32(_mm256_unpackhi_epi16(v_src, v_src), 16); __m256 v_dst0 = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(v_src_lo), scale256), shift256); __m256 v_dst1 = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(v_src_hi), scale256), shift256); _mm256_storeu_si256((__m256i *)(dst + x), _mm256_cvtps_epi32(v_dst0)); _mm256_storeu_si256((__m256i *)(dst + x + 8), _mm256_cvtps_epi32(v_dst1)); } for (; x < width; x++) dst[x] = saturate_cast<int>(src[x] * scale + shift); }
int warpAffineBlockline(int *adelta, int *bdelta, short* xy, short* alpha, int X0, int Y0, int bw) { const int AB_BITS = MAX(10, (int)INTER_BITS); int x1 = 0; __m256i fxy_mask = _mm256_set1_epi32(INTER_TAB_SIZE - 1); __m256i XX = _mm256_set1_epi32(X0), YY = _mm256_set1_epi32(Y0); for (; x1 <= bw - 16; x1 += 16) { __m256i tx0, tx1, ty0, ty1; tx0 = _mm256_add_epi32(_mm256_loadu_si256((const __m256i*)(adelta + x1)), XX); ty0 = _mm256_add_epi32(_mm256_loadu_si256((const __m256i*)(bdelta + x1)), YY); tx1 = _mm256_add_epi32(_mm256_loadu_si256((const __m256i*)(adelta + x1 + 8)), XX); ty1 = _mm256_add_epi32(_mm256_loadu_si256((const __m256i*)(bdelta + x1 + 8)), YY); tx0 = _mm256_srai_epi32(tx0, AB_BITS - INTER_BITS); ty0 = _mm256_srai_epi32(ty0, AB_BITS - INTER_BITS); tx1 = _mm256_srai_epi32(tx1, AB_BITS - INTER_BITS); ty1 = _mm256_srai_epi32(ty1, AB_BITS - INTER_BITS); __m256i fx_ = _mm256_packs_epi32(_mm256_and_si256(tx0, fxy_mask), _mm256_and_si256(tx1, fxy_mask)); __m256i fy_ = _mm256_packs_epi32(_mm256_and_si256(ty0, fxy_mask), _mm256_and_si256(ty1, fxy_mask)); tx0 = _mm256_packs_epi32(_mm256_srai_epi32(tx0, INTER_BITS), _mm256_srai_epi32(tx1, INTER_BITS)); ty0 = _mm256_packs_epi32(_mm256_srai_epi32(ty0, INTER_BITS), _mm256_srai_epi32(ty1, INTER_BITS)); fx_ = _mm256_adds_epi16(fx_, _mm256_slli_epi16(fy_, INTER_BITS)); fx_ = _mm256_permute4x64_epi64(fx_, (3 << 6) + (1 << 4) + (2 << 2) + 0); _mm256_storeu_si256((__m256i*)(xy + x1 * 2), _mm256_unpacklo_epi16(tx0, ty0)); _mm256_storeu_si256((__m256i*)(xy + x1 * 2 + 16), _mm256_unpackhi_epi16(tx0, ty0)); _mm256_storeu_si256((__m256i*)(alpha + x1), fx_); } _mm256_zeroupper(); return x1; }
__m256i test_mm256_unpacklo_epi16(__m256i a, __m256i b) { // CHECK: shufflevector <16 x i16> %{{.*}}, <16 x i16> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27> return _mm256_unpacklo_epi16(a, b); }
static void hadamard_col8x2_avx2(__m256i *in, int iter) { __m256i a0 = in[0]; __m256i a1 = in[1]; __m256i a2 = in[2]; __m256i a3 = in[3]; __m256i a4 = in[4]; __m256i a5 = in[5]; __m256i a6 = in[6]; __m256i a7 = in[7]; __m256i b0 = _mm256_add_epi16(a0, a1); __m256i b1 = _mm256_sub_epi16(a0, a1); __m256i b2 = _mm256_add_epi16(a2, a3); __m256i b3 = _mm256_sub_epi16(a2, a3); __m256i b4 = _mm256_add_epi16(a4, a5); __m256i b5 = _mm256_sub_epi16(a4, a5); __m256i b6 = _mm256_add_epi16(a6, a7); __m256i b7 = _mm256_sub_epi16(a6, a7); a0 = _mm256_add_epi16(b0, b2); a1 = _mm256_add_epi16(b1, b3); a2 = _mm256_sub_epi16(b0, b2); a3 = _mm256_sub_epi16(b1, b3); a4 = _mm256_add_epi16(b4, b6); a5 = _mm256_add_epi16(b5, b7); a6 = _mm256_sub_epi16(b4, b6); a7 = _mm256_sub_epi16(b5, b7); if (iter == 0) { b0 = _mm256_add_epi16(a0, a4); b7 = _mm256_add_epi16(a1, a5); b3 = _mm256_add_epi16(a2, a6); b4 = _mm256_add_epi16(a3, a7); b2 = _mm256_sub_epi16(a0, a4); b6 = _mm256_sub_epi16(a1, a5); b1 = _mm256_sub_epi16(a2, a6); b5 = _mm256_sub_epi16(a3, a7); a0 = _mm256_unpacklo_epi16(b0, b1); a1 = _mm256_unpacklo_epi16(b2, b3); a2 = _mm256_unpackhi_epi16(b0, b1); a3 = _mm256_unpackhi_epi16(b2, b3); a4 = _mm256_unpacklo_epi16(b4, b5); a5 = _mm256_unpacklo_epi16(b6, b7); a6 = _mm256_unpackhi_epi16(b4, b5); a7 = _mm256_unpackhi_epi16(b6, b7); b0 = _mm256_unpacklo_epi32(a0, a1); b1 = _mm256_unpacklo_epi32(a4, a5); b2 = _mm256_unpackhi_epi32(a0, a1); b3 = _mm256_unpackhi_epi32(a4, a5); b4 = _mm256_unpacklo_epi32(a2, a3); b5 = _mm256_unpacklo_epi32(a6, a7); b6 = _mm256_unpackhi_epi32(a2, a3); b7 = _mm256_unpackhi_epi32(a6, a7); in[0] = _mm256_unpacklo_epi64(b0, b1); in[1] = _mm256_unpackhi_epi64(b0, b1); in[2] = _mm256_unpacklo_epi64(b2, b3); in[3] = _mm256_unpackhi_epi64(b2, b3); in[4] = _mm256_unpacklo_epi64(b4, b5); in[5] = _mm256_unpackhi_epi64(b4, b5); in[6] = _mm256_unpacklo_epi64(b6, b7); in[7] = _mm256_unpackhi_epi64(b6, b7); } else { in[0] = _mm256_add_epi16(a0, a4); in[7] = _mm256_add_epi16(a1, a5); in[3] = _mm256_add_epi16(a2, a6); in[4] = _mm256_add_epi16(a3, a7); in[2] = _mm256_sub_epi16(a0, a4); in[6] = _mm256_sub_epi16(a1, a5); in[1] = _mm256_sub_epi16(a2, a6); in[5] = _mm256_sub_epi16(a3, a7); } }
void extern avx2_test (void) { x = _mm256_unpacklo_epi16 (x, x); }
/* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_AVX(void* in, void* out, const size_t size, const size_t elem_size) { size_t hh, ii, jj, kk, mm; char* in_b = (char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(size); size_t nrows = 8 * elem_size; size_t nbyte_row = size / 8; if (elem_size % 4) return bshuf_trans_byte_bitrow_SSE(in, out, size, elem_size); __m256i ymm_0[8]; __m256i ymm_1[8]; __m256i ymm_storeage[8][4]; for (jj = 0; jj + 31 < nbyte_row; jj += 32) { for (ii = 0; ii + 3 < elem_size; ii += 4) { for (hh = 0; hh < 4; hh ++) { for (kk = 0; kk < 8; kk ++){ ymm_0[kk] = _mm256_loadu_si256((__m256i *) &in_b[ (ii * 8 + hh * 8 + kk) * nbyte_row + jj]); } for (kk = 0; kk < 4; kk ++){ ymm_1[kk] = _mm256_unpacklo_epi8(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); ymm_1[kk + 4] = _mm256_unpackhi_epi8(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); } for (kk = 0; kk < 2; kk ++){ for (mm = 0; mm < 2; mm ++){ ymm_0[kk * 4 + mm] = _mm256_unpacklo_epi16( ymm_1[kk * 4 + mm * 2], ymm_1[kk * 4 + mm * 2 + 1]); ymm_0[kk * 4 + mm + 2] = _mm256_unpackhi_epi16( ymm_1[kk * 4 + mm * 2], ymm_1[kk * 4 + mm * 2 + 1]); } } for (kk = 0; kk < 4; kk ++){ ymm_1[kk * 2] = _mm256_unpacklo_epi32(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); ymm_1[kk * 2 + 1] = _mm256_unpackhi_epi32(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); } for (kk = 0; kk < 8; kk ++){ ymm_storeage[kk][hh] = ymm_1[kk]; } } for (mm = 0; mm < 8; mm ++) { for (kk = 0; kk < 4; kk ++){ ymm_0[kk] = ymm_storeage[mm][kk]; } ymm_1[0] = _mm256_unpacklo_epi64(ymm_0[0], ymm_0[1]); ymm_1[1] = _mm256_unpacklo_epi64(ymm_0[2], ymm_0[3]); ymm_1[2] = _mm256_unpackhi_epi64(ymm_0[0], ymm_0[1]); ymm_1[3] = _mm256_unpackhi_epi64(ymm_0[2], ymm_0[3]); ymm_0[0] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 32); ymm_0[1] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 32); ymm_0[2] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 49); ymm_0[3] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 49); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 0 * 16) * nrows + ii * 8], ymm_0[0]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 0 * 16 + 1) * nrows + ii * 8], ymm_0[1]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 1 * 16) * nrows + ii * 8], ymm_0[2]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 1 * 16 + 1) * nrows + ii * 8], ymm_0[3]); } } } for (ii = 0; ii < nrows; ii ++ ) { for (jj = nbyte_row - nbyte_row % 32; jj < nbyte_row; jj ++) { out_b[jj * nrows + ii] = in_b[ii * nbyte_row + jj]; } } return size * elem_size; }
/** * \brief quantize transformed coefficents * */ void kvz_quant_flat_avx2(const encoder_state_t * const state, coeff_t *coef, coeff_t *q_coef, int32_t width, int32_t height, int8_t type, int8_t scan_idx, int8_t block_type) { const encoder_control_t * const encoder = state->encoder_control; const uint32_t log2_block_size = kvz_g_convert_to_bit[width] + 2; const uint32_t * const scan = kvz_g_sig_last_scan[scan_idx][log2_block_size - 1]; int32_t qp_scaled = kvz_get_scaled_qp(type, state->global->QP, (encoder->bitdepth - 8) * 6); const uint32_t log2_tr_size = kvz_g_convert_to_bit[width] + 2; const int32_t scalinglist_type = (block_type == CU_INTRA ? 0 : 3) + (int8_t)("\0\3\1\2"[type]); const int32_t *quant_coeff = encoder->scaling_list.quant_coeff[log2_tr_size - 2][scalinglist_type][qp_scaled % 6]; const int32_t transform_shift = MAX_TR_DYNAMIC_RANGE - encoder->bitdepth - log2_tr_size; //!< Represents scaling through forward transform const int32_t q_bits = QUANT_SHIFT + qp_scaled / 6 + transform_shift; const int32_t add = ((state->global->slicetype == KVZ_SLICE_I) ? 171 : 85) << (q_bits - 9); const int32_t q_bits8 = q_bits - 8; assert(quant_coeff[0] <= (1 << 15) - 1 && quant_coeff[0] >= -(1 << 15)); //Assuming flat values to fit int16_t uint32_t ac_sum = 0; __m256i v_ac_sum = _mm256_setzero_si256(); __m256i v_quant_coeff = _mm256_set1_epi16(quant_coeff[0]); for (int32_t n = 0; n < width * height; n += 16) { __m256i v_level = _mm256_loadu_si256((__m256i*)&(coef[n])); __m256i v_sign = _mm256_cmpgt_epi16(_mm256_setzero_si256(), v_level); v_sign = _mm256_or_si256(v_sign, _mm256_set1_epi16(1)); v_level = _mm256_abs_epi16(v_level); __m256i low_a = _mm256_unpacklo_epi16(v_level, _mm256_set1_epi16(0)); __m256i high_a = _mm256_unpackhi_epi16(v_level, _mm256_set1_epi16(0)); __m256i low_b = _mm256_unpacklo_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i high_b = _mm256_unpackhi_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i v_level32_a = _mm256_madd_epi16(low_a, low_b); __m256i v_level32_b = _mm256_madd_epi16(high_a, high_b); v_level32_a = _mm256_add_epi32(v_level32_a, _mm256_set1_epi32(add)); v_level32_b = _mm256_add_epi32(v_level32_b, _mm256_set1_epi32(add)); v_level32_a = _mm256_srai_epi32(v_level32_a, q_bits); v_level32_b = _mm256_srai_epi32(v_level32_b, q_bits); v_level = _mm256_packs_epi32(v_level32_a, v_level32_b); v_level = _mm256_sign_epi16(v_level, v_sign); _mm256_storeu_si256((__m256i*)&(q_coef[n]), v_level); v_ac_sum = _mm256_add_epi32(v_ac_sum, v_level32_a); v_ac_sum = _mm256_add_epi32(v_ac_sum, v_level32_b); } __m128i temp = _mm_add_epi32(_mm256_castsi256_si128(v_ac_sum), _mm256_extracti128_si256(v_ac_sum, 1)); temp = _mm_add_epi32(temp, _mm_shuffle_epi32(temp, KVZ_PERMUTE(2, 3, 0, 1))); temp = _mm_add_epi32(temp, _mm_shuffle_epi32(temp, KVZ_PERMUTE(1, 0, 1, 0))); ac_sum += _mm_cvtsi128_si32(temp); if (!(encoder->sign_hiding && ac_sum >= 2)) return; int32_t delta_u[LCU_WIDTH*LCU_WIDTH >> 2]; for (int32_t n = 0; n < width * height; n += 16) { __m256i v_level = _mm256_loadu_si256((__m256i*)&(coef[n])); v_level = _mm256_abs_epi16(v_level); __m256i low_a = _mm256_unpacklo_epi16(v_level, _mm256_set1_epi16(0)); __m256i high_a = _mm256_unpackhi_epi16(v_level, _mm256_set1_epi16(0)); __m256i low_b = _mm256_unpacklo_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i high_b = _mm256_unpackhi_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i v_level32_a = _mm256_madd_epi16(low_a, low_b); __m256i v_level32_b = _mm256_madd_epi16(high_a, high_b); v_level32_a = _mm256_add_epi32(v_level32_a, _mm256_set1_epi32(add)); v_level32_b = _mm256_add_epi32(v_level32_b, _mm256_set1_epi32(add)); v_level32_a = _mm256_srai_epi32(v_level32_a, q_bits); v_level32_b = _mm256_srai_epi32(v_level32_b, q_bits); v_level = _mm256_packs_epi32(v_level32_a, v_level32_b); __m256i v_coef = _mm256_loadu_si256((__m256i*)&(coef[n])); __m256i v_coef_a = _mm256_unpacklo_epi16(_mm256_abs_epi16(v_coef), _mm256_set1_epi16(0)); __m256i v_coef_b = _mm256_unpackhi_epi16(_mm256_abs_epi16(v_coef), _mm256_set1_epi16(0)); __m256i v_quant_coeff_a = _mm256_unpacklo_epi16(v_quant_coeff, _mm256_set1_epi16(0)); __m256i v_quant_coeff_b = _mm256_unpackhi_epi16(v_quant_coeff, _mm256_set1_epi16(0)); v_coef_a = _mm256_madd_epi16(v_coef_a, v_quant_coeff_a); v_coef_b = _mm256_madd_epi16(v_coef_b, v_quant_coeff_b); v_coef_a = _mm256_sub_epi32(v_coef_a, _mm256_slli_epi32(_mm256_unpacklo_epi16(v_level, _mm256_set1_epi16(0)), q_bits) ); v_coef_b = _mm256_sub_epi32(v_coef_b, _mm256_slli_epi32(_mm256_unpackhi_epi16(v_level, _mm256_set1_epi16(0)), q_bits) ); v_coef_a = _mm256_srai_epi32(v_coef_a, q_bits8); v_coef_b = _mm256_srai_epi32(v_coef_b, q_bits8); _mm_storeu_si128((__m128i*)&(delta_u[n+0*4]), _mm256_castsi256_si128(v_coef_a)); _mm_storeu_si128((__m128i*)&(delta_u[n+2*4]), _mm256_extracti128_si256(v_coef_a, 1)); _mm_storeu_si128((__m128i*)&(delta_u[n+1*4]), _mm256_castsi256_si128(v_coef_b)); _mm_storeu_si128((__m128i*)&(delta_u[n+3*4]), _mm256_extracti128_si256(v_coef_b, 1)); } if (ac_sum >= 2) { #define SCAN_SET_SIZE 16 #define LOG2_SCAN_SET_SIZE 4 int32_t n, last_cg = -1, abssum = 0, subset, subpos; for (subset = (width*height - 1) >> LOG2_SCAN_SET_SIZE; subset >= 0; subset--) { int32_t first_nz_pos_in_cg = SCAN_SET_SIZE, last_nz_pos_in_cg = -1; subpos = subset << LOG2_SCAN_SET_SIZE; abssum = 0; // Find last coeff pos for (n = SCAN_SET_SIZE - 1; n >= 0; n--) { if (q_coef[scan[n + subpos]]) { last_nz_pos_in_cg = n; break; } } // First coeff pos for (n = 0; n <SCAN_SET_SIZE; n++) { if (q_coef[scan[n + subpos]]) { first_nz_pos_in_cg = n; break; } } // Sum all kvz_quant coeffs between first and last for (n = first_nz_pos_in_cg; n <= last_nz_pos_in_cg; n++) { abssum += q_coef[scan[n + subpos]]; } if (last_nz_pos_in_cg >= 0 && last_cg == -1) { last_cg = 1; } if (last_nz_pos_in_cg - first_nz_pos_in_cg >= 4) { int32_t signbit = (q_coef[scan[subpos + first_nz_pos_in_cg]] > 0 ? 0 : 1); if (signbit != (abssum & 0x1)) { // compare signbit with sum_parity int32_t min_cost_inc = 0x7fffffff, min_pos = -1, cur_cost = 0x7fffffff; int16_t final_change = 0, cur_change = 0; for (n = (last_cg == 1 ? last_nz_pos_in_cg : SCAN_SET_SIZE - 1); n >= 0; n--) { uint32_t blkPos = scan[n + subpos]; if (q_coef[blkPos] != 0) { if (delta_u[blkPos] > 0) { cur_cost = -delta_u[blkPos]; cur_change = 1; } else if (n == first_nz_pos_in_cg && abs(q_coef[blkPos]) == 1) { cur_cost = 0x7fffffff; } else { cur_cost = delta_u[blkPos]; cur_change = -1; } } else if (n < first_nz_pos_in_cg && ((coef[blkPos] >= 0) ? 0 : 1) != signbit) { cur_cost = 0x7fffffff; } else { cur_cost = -delta_u[blkPos]; cur_change = 1; } if (cur_cost < min_cost_inc) { min_cost_inc = cur_cost; final_change = cur_change; min_pos = blkPos; } } // CG loop if (q_coef[min_pos] == 32767 || q_coef[min_pos] == -32768) { final_change = -1; } if (coef[min_pos] >= 0) q_coef[min_pos] += final_change; else q_coef[min_pos] -= final_change; } // Hide } if (last_cg == 1) last_cg = 0; } #undef SCAN_SET_SIZE #undef LOG2_SCAN_SET_SIZE }
/* Routine optimized for shuffling a buffer for a type size larger than 16 bytes. */ static void shuffle16_tiled_avx2(uint8_t* const dest, const uint8_t* const src, const size_t vectorizable_elements, const size_t total_elements, const size_t bytesoftype) { size_t j; int k, l; __m256i ymm0[16], ymm1[16]; const lldiv_t vecs_per_el = lldiv(bytesoftype, sizeof(__m128i)); /* Create the shuffle mask. NOTE: The XMM/YMM 'set' intrinsics require the arguments to be ordered from most to least significant (i.e., their order is reversed when compared to loading the mask from an array). */ const __m256i shmask = _mm256_set_epi8( 0x0f, 0x07, 0x0e, 0x06, 0x0d, 0x05, 0x0c, 0x04, 0x0b, 0x03, 0x0a, 0x02, 0x09, 0x01, 0x08, 0x00, 0x0f, 0x07, 0x0e, 0x06, 0x0d, 0x05, 0x0c, 0x04, 0x0b, 0x03, 0x0a, 0x02, 0x09, 0x01, 0x08, 0x00); for (j = 0; j < vectorizable_elements; j += sizeof(__m256i)) { /* Advance the offset into the type by the vector size (in bytes), unless this is the initial iteration and the type size is not a multiple of the vector size. In that case, only advance by the number of bytes necessary so that the number of remaining bytes in the type will be a multiple of the vector size. */ size_t offset_into_type; for (offset_into_type = 0; offset_into_type < bytesoftype; offset_into_type += (offset_into_type == 0 && vecs_per_el.rem > 0 ? vecs_per_el.rem : sizeof(__m128i))) { /* Fetch elements in groups of 512 bytes */ const uint8_t* const src_with_offset = src + offset_into_type; for (k = 0; k < 16; k++) { ymm0[k] = _mm256_loadu2_m128i( (__m128i*)(src_with_offset + (j + (2 * k) + 1) * bytesoftype), (__m128i*)(src_with_offset + (j + (2 * k)) * bytesoftype)); } /* Transpose bytes */ for (k = 0, l = 0; k < 8; k++, l +=2) { ymm1[k*2] = _mm256_unpacklo_epi8(ymm0[l], ymm0[l+1]); ymm1[k*2+1] = _mm256_unpackhi_epi8(ymm0[l], ymm0[l+1]); } /* Transpose words */ for (k = 0, l = -2; k < 8; k++, l++) { if ((k%2) == 0) l += 2; ymm0[k*2] = _mm256_unpacklo_epi16(ymm1[l], ymm1[l+2]); ymm0[k*2+1] = _mm256_unpackhi_epi16(ymm1[l], ymm1[l+2]); } /* Transpose double words */ for (k = 0, l = -4; k < 8; k++, l++) { if ((k%4) == 0) l += 4; ymm1[k*2] = _mm256_unpacklo_epi32(ymm0[l], ymm0[l+4]); ymm1[k*2+1] = _mm256_unpackhi_epi32(ymm0[l], ymm0[l+4]); } /* Transpose quad words */ for (k = 0; k < 8; k++) { ymm0[k*2] = _mm256_unpacklo_epi64(ymm1[k], ymm1[k+8]); ymm0[k*2+1] = _mm256_unpackhi_epi64(ymm1[k], ymm1[k+8]); } for (k = 0; k < 16; k++) { ymm0[k] = _mm256_permute4x64_epi64(ymm0[k], 0xd8); ymm0[k] = _mm256_shuffle_epi8(ymm0[k], shmask); } /* Store the result vectors */ uint8_t* const dest_for_jth_element = dest + j; for (k = 0; k < 16; k++) { _mm256_storeu_si256((__m256i*)(dest_for_jth_element + (total_elements * (offset_into_type + k))), ymm0[k]); } } } }
/* Routine optimized for unshuffling a buffer for a type size larger than 16 bytes. */ static void unshuffle16_tiled_avx2(uint8_t* const dest, const uint8_t* const src, const size_t vectorizable_elements, const size_t total_elements, const size_t bytesoftype) { size_t i; int j; __m256i ymm0[16], ymm1[16]; const lldiv_t vecs_per_el = lldiv(bytesoftype, sizeof(__m128i)); /* The unshuffle loops are inverted (compared to shuffle_tiled16_avx2) to optimize cache utilization. */ size_t offset_into_type; for (offset_into_type = 0; offset_into_type < bytesoftype; offset_into_type += (offset_into_type == 0 && vecs_per_el.rem > 0 ? vecs_per_el.rem : sizeof(__m128i))) { for (i = 0; i < vectorizable_elements; i += sizeof(__m256i)) { /* Load the first 16 bytes of 32 adjacent elements (512 bytes) into 16 YMM registers */ const uint8_t* const src_for_ith_element = src + i; for (j = 0; j < 16; j++) { ymm0[j] = _mm256_loadu_si256((__m256i*)(src_for_ith_element + (total_elements * (offset_into_type + j)))); } /* Shuffle bytes */ for (j = 0; j < 8; j++) { /* Compute the low 32 bytes */ ymm1[j] = _mm256_unpacklo_epi8(ymm0[j*2], ymm0[j*2+1]); /* Compute the hi 32 bytes */ ymm1[8+j] = _mm256_unpackhi_epi8(ymm0[j*2], ymm0[j*2+1]); } /* Shuffle 2-byte words */ for (j = 0; j < 8; j++) { /* Compute the low 32 bytes */ ymm0[j] = _mm256_unpacklo_epi16(ymm1[j*2], ymm1[j*2+1]); /* Compute the hi 32 bytes */ ymm0[8+j] = _mm256_unpackhi_epi16(ymm1[j*2], ymm1[j*2+1]); } /* Shuffle 4-byte dwords */ for (j = 0; j < 8; j++) { /* Compute the low 32 bytes */ ymm1[j] = _mm256_unpacklo_epi32(ymm0[j*2], ymm0[j*2+1]); /* Compute the hi 32 bytes */ ymm1[8+j] = _mm256_unpackhi_epi32(ymm0[j*2], ymm0[j*2+1]); } /* Shuffle 8-byte qwords */ for (j = 0; j < 8; j++) { /* Compute the low 32 bytes */ ymm0[j] = _mm256_unpacklo_epi64(ymm1[j*2], ymm1[j*2+1]); /* Compute the hi 32 bytes */ ymm0[8+j] = _mm256_unpackhi_epi64(ymm1[j*2], ymm1[j*2+1]); } for (j = 0; j < 8; j++) { ymm1[j] = _mm256_permute2x128_si256(ymm0[j], ymm0[j+8], 0x20); ymm1[j+8] = _mm256_permute2x128_si256(ymm0[j], ymm0[j+8], 0x31); } /* Store the result vectors in proper order */ const uint8_t* const dest_with_offset = dest + offset_into_type; _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x01) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x00) * bytesoftype), ymm1[0]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x03) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x02) * bytesoftype), ymm1[4]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x05) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x04) * bytesoftype), ymm1[2]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x07) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x06) * bytesoftype), ymm1[6]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x09) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x08) * bytesoftype), ymm1[1]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x0b) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x0a) * bytesoftype), ymm1[5]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x0d) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x0c) * bytesoftype), ymm1[3]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x0f) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x0e) * bytesoftype), ymm1[7]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x11) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x10) * bytesoftype), ymm1[8]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x13) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x12) * bytesoftype), ymm1[12]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x15) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x14) * bytesoftype), ymm1[10]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x17) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x16) * bytesoftype), ymm1[14]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x19) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x18) * bytesoftype), ymm1[9]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x1b) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x1a) * bytesoftype), ymm1[13]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x1d) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x1c) * bytesoftype), ymm1[11]); _mm256_storeu2_m128i( (__m128i*)(dest_with_offset + (i + 0x1f) * bytesoftype), (__m128i*)(dest_with_offset + (i + 0x1e) * bytesoftype), ymm1[15]); } } }
/* Routine optimized for unshuffling a buffer for a type size of 16 bytes. */ static void unshuffle16_avx2(uint8_t* const dest, const uint8_t* const src, const size_t vectorizable_elements, const size_t total_elements) { static const size_t bytesoftype = 16; size_t i; int j; __m256i ymm0[16], ymm1[16]; for (i = 0; i < vectorizable_elements; i += sizeof(__m256i)) { /* Fetch 32 elements (512 bytes) into 16 YMM registers. */ const uint8_t* const src_for_ith_element = src + i; for (j = 0; j < 16; j++) { ymm0[j] = _mm256_loadu_si256((__m256i*)(src_for_ith_element + (j * total_elements))); } /* Shuffle bytes */ for (j = 0; j < 8; j++) { /* Compute the low 32 bytes */ ymm1[j] = _mm256_unpacklo_epi8(ymm0[j*2], ymm0[j*2+1]); /* Compute the hi 32 bytes */ ymm1[8+j] = _mm256_unpackhi_epi8(ymm0[j*2], ymm0[j*2+1]); } /* Shuffle 2-byte words */ for (j = 0; j < 8; j++) { /* Compute the low 32 bytes */ ymm0[j] = _mm256_unpacklo_epi16(ymm1[j*2], ymm1[j*2+1]); /* Compute the hi 32 bytes */ ymm0[8+j] = _mm256_unpackhi_epi16(ymm1[j*2], ymm1[j*2+1]); } /* Shuffle 4-byte dwords */ for (j = 0; j < 8; j++) { /* Compute the low 32 bytes */ ymm1[j] = _mm256_unpacklo_epi32(ymm0[j*2], ymm0[j*2+1]); /* Compute the hi 32 bytes */ ymm1[8+j] = _mm256_unpackhi_epi32(ymm0[j*2], ymm0[j*2+1]); } /* Shuffle 8-byte qwords */ for (j = 0; j < 8; j++) { /* Compute the low 32 bytes */ ymm0[j] = _mm256_unpacklo_epi64(ymm1[j*2], ymm1[j*2+1]); /* Compute the hi 32 bytes */ ymm0[8+j] = _mm256_unpackhi_epi64(ymm1[j*2], ymm1[j*2+1]); } for (j = 0; j < 8; j++) { ymm1[j] = _mm256_permute2x128_si256(ymm0[j], ymm0[j+8], 0x20); ymm1[j+8] = _mm256_permute2x128_si256(ymm0[j], ymm0[j+8], 0x31); } /* Store the result vectors in proper order */ _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (0 * sizeof(__m256i))), ymm1[0]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (1 * sizeof(__m256i))), ymm1[4]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (2 * sizeof(__m256i))), ymm1[2]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (3 * sizeof(__m256i))), ymm1[6]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (4 * sizeof(__m256i))), ymm1[1]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (5 * sizeof(__m256i))), ymm1[5]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (6 * sizeof(__m256i))), ymm1[3]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (7 * sizeof(__m256i))), ymm1[7]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (8 * sizeof(__m256i))), ymm1[8]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (9 * sizeof(__m256i))), ymm1[12]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (10 * sizeof(__m256i))), ymm1[10]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (11 * sizeof(__m256i))), ymm1[14]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (12 * sizeof(__m256i))), ymm1[9]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (13 * sizeof(__m256i))), ymm1[13]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (14 * sizeof(__m256i))), ymm1[11]); _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (15 * sizeof(__m256i))), ymm1[15]); } }