int32_t dot_product(int16_t *x, int16_t *y, uint32_t N, //must be a multiple of 8 uint8_t output_shift) { uint32_t n; #if defined(__x86_64__) || defined(__i386__) __m128i *x128,*y128,mmtmp1,mmtmp2,mmtmp3,mmcumul,mmcumul_re,mmcumul_im; __m64 mmtmp7; __m128i minus_i = _mm_set_epi16(-1,1,-1,1,-1,1,-1,1); int32_t result; x128 = (__m128i*) x; y128 = (__m128i*) y; mmcumul_re = _mm_setzero_si128(); mmcumul_im = _mm_setzero_si128(); for (n=0; n<(N>>2); n++) { //printf("n=%d, x128=%p, y128=%p\n",n,x128,y128); // print_shorts("x",&x128[0]); // print_shorts("y",&y128[0]); // this computes Re(z) = Re(x)*Re(y) + Im(x)*Im(y) mmtmp1 = _mm_madd_epi16(x128[0],y128[0]); // print_ints("re",&mmtmp1); // mmtmp1 contains real part of 4 consecutive outputs (32-bit) // shift and accumulate results mmtmp1 = _mm_srai_epi32(mmtmp1,output_shift); mmcumul_re = _mm_add_epi32(mmcumul_re,mmtmp1); // print_ints("re",&mmcumul_re); // this computes Im(z) = Re(x)*Im(y) - Re(y)*Im(x) mmtmp2 = _mm_shufflelo_epi16(y128[0],_MM_SHUFFLE(2,3,0,1)); // print_shorts("y",&mmtmp2); mmtmp2 = _mm_shufflehi_epi16(mmtmp2,_MM_SHUFFLE(2,3,0,1)); // print_shorts("y",&mmtmp2); mmtmp2 = _mm_sign_epi16(mmtmp2,minus_i); // print_shorts("y",&mmtmp2); mmtmp3 = _mm_madd_epi16(x128[0],mmtmp2); // print_ints("im",&mmtmp3); // mmtmp3 contains imag part of 4 consecutive outputs (32-bit) // shift and accumulate results mmtmp3 = _mm_srai_epi32(mmtmp3,output_shift); mmcumul_im = _mm_add_epi32(mmcumul_im,mmtmp3); // print_ints("im",&mmcumul_im); x128++; y128++; } // this gives Re Re Im Im mmcumul = _mm_hadd_epi32(mmcumul_re,mmcumul_im); // print_ints("cumul1",&mmcumul); // this gives Re Im Re Im mmcumul = _mm_hadd_epi32(mmcumul,mmcumul); // print_ints("cumul2",&mmcumul); //mmcumul = _mm_srai_epi32(mmcumul,output_shift); // extract the lower half mmtmp7 = _mm_movepi64_pi64(mmcumul); // print_ints("mmtmp7",&mmtmp7); // pack the result mmtmp7 = _mm_packs_pi32(mmtmp7,mmtmp7); // print_shorts("mmtmp7",&mmtmp7); // convert back to integer result = _mm_cvtsi64_si32(mmtmp7); _mm_empty(); _m_empty(); return(result); #elif defined(__arm__) int16x4_t *x_128=(int16x4_t*)x; int16x4_t *y_128=(int16x4_t*)y; int32x4_t tmp_re,tmp_im; int32x4_t tmp_re1,tmp_im1; int32x4_t re_cumul,im_cumul; int32x2_t re_cumul2,im_cumul2; int32x4_t shift = vdupq_n_s32(-output_shift); int32x2x2_t result2; int16_t conjug[4]__attribute__((aligned(16))) = {-1,1,-1,1} ; re_cumul = vdupq_n_s32(0); im_cumul = vdupq_n_s32(0); for (n=0; n<(N>>2); n++) { tmp_re = vmull_s16(*x_128++, *y_128++); //tmp_re = [Re(x[0])Re(y[0]) Im(x[0])Im(y[0]) Re(x[1])Re(y[1]) Im(x[1])Im(y[1])] tmp_re1 = vmull_s16(*x_128++, *y_128++); //tmp_re1 = [Re(x1[1])Re(x2[1]) Im(x1[1])Im(x2[1]) Re(x1[1])Re(x2[2]) Im(x1[1])Im(x2[2])] tmp_re = vcombine_s32(vpadd_s32(vget_low_s32(tmp_re),vget_high_s32(tmp_re)), vpadd_s32(vget_low_s32(tmp_re1),vget_high_s32(tmp_re1))); //tmp_re = [Re(ch[0])Re(rx[0])+Im(ch[0])Im(ch[0]) Re(ch[1])Re(rx[1])+Im(ch[1])Im(ch[1]) Re(ch[2])Re(rx[2])+Im(ch[2]) Im(ch[2]) Re(ch[3])Re(rx[3])+Im(ch[3])Im(ch[3])] tmp_im = vmull_s16(vrev32_s16(vmul_s16(*x_128++,*(int16x4_t*)conjug)),*y_128++); //tmp_im = [-Im(ch[0])Re(rx[0]) Re(ch[0])Im(rx[0]) -Im(ch[1])Re(rx[1]) Re(ch[1])Im(rx[1])] tmp_im1 = vmull_s16(vrev32_s16(vmul_s16(*x_128++,*(int16x4_t*)conjug)),*y_128++); //tmp_im1 = [-Im(ch[2])Re(rx[2]) Re(ch[2])Im(rx[2]) -Im(ch[3])Re(rx[3]) Re(ch[3])Im(rx[3])] tmp_im = vcombine_s32(vpadd_s32(vget_low_s32(tmp_im),vget_high_s32(tmp_im)), vpadd_s32(vget_low_s32(tmp_im1),vget_high_s32(tmp_im1))); //tmp_im = [-Im(ch[0])Re(rx[0])+Re(ch[0])Im(rx[0]) -Im(ch[1])Re(rx[1])+Re(ch[1])Im(rx[1]) -Im(ch[2])Re(rx[2])+Re(ch[2])Im(rx[2]) -Im(ch[3])Re(rx[3])+Re(ch[3])Im(rx[3])] re_cumul = vqaddq_s32(re_cumul,vqshlq_s32(tmp_re,shift)); im_cumul = vqaddq_s32(im_cumul,vqshlq_s32(tmp_im,shift)); } re_cumul2 = vpadd_s32(vget_low_s32(re_cumul),vget_high_s32(re_cumul)); im_cumul2 = vpadd_s32(vget_low_s32(im_cumul),vget_high_s32(im_cumul)); re_cumul2 = vpadd_s32(re_cumul2,re_cumul2); im_cumul2 = vpadd_s32(im_cumul2,im_cumul2); result2 = vzip_s32(re_cumul2,im_cumul2); return(vget_lane_s32(result2.val[0],0)); #endif }
void test_vrev32s16 (void) { int16x4_t out_int16x4_t; int16x4_t arg0_int16x4_t; out_int16x4_t = vrev32_s16 (arg0_int16x4_t); }
void WebRtcIsacfix_AllpassFilter2FixDec16Neon( int16_t* data_ch1, // Input and output in channel 1, in Q0 int16_t* data_ch2, // Input and output in channel 2, in Q0 const int16_t* factor_ch1, // Scaling factor for channel 1, in Q15 const int16_t* factor_ch2, // Scaling factor for channel 2, in Q15 const int length, // Length of the data buffers int32_t* filter_state_ch1, // Filter state for channel 1, in Q16 int32_t* filter_state_ch2) { // Filter state for channel 2, in Q16 assert(length % 2 == 0); int n = 0; int16x4_t factorv; int16x4_t datav; int32x4_t statev; int32x2_t tmp; // Load factor_ch1 and factor_ch2. tmp = vld1_dup_s32((int32_t*)factor_ch1); tmp = vld1_lane_s32((int32_t*)factor_ch2, tmp, 1); factorv = vreinterpret_s16_s32(tmp); // Load filter_state_ch1[0] and filter_state_ch2[0]. statev = vld1q_dup_s32(filter_state_ch1); statev = vld1q_lane_s32(filter_state_ch2, statev, 2); // Loop unrolling preprocessing. int32x4_t a; int16x4_t tmp1, tmp2; // Load data_ch1[0] and data_ch2[0]. datav = vld1_dup_s16(data_ch1); datav = vld1_lane_s16(data_ch2, datav, 2); a = vqdmlal_s16(statev, datav, factorv); tmp1 = vshrn_n_s32(a, 16); // Update filter_state_ch1[0] and filter_state_ch2[0]. statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv); // Load filter_state_ch1[1] and filter_state_ch2[1]. statev = vld1q_lane_s32(filter_state_ch1 + 1, statev, 1); statev = vld1q_lane_s32(filter_state_ch2 + 1, statev, 3); // Load data_ch1[1] and data_ch2[1]. tmp1 = vld1_lane_s16(data_ch1 + 1, tmp1, 1); tmp1 = vld1_lane_s16(data_ch2 + 1, tmp1, 3); datav = vrev32_s16(tmp1); // Loop unrolling processing. for (n = 0; n < length - 2; n += 2) { a = vqdmlal_s16(statev, datav, factorv); tmp1 = vshrn_n_s32(a, 16); // Store data_ch1[n] and data_ch2[n]. vst1_lane_s16(data_ch1 + n, tmp1, 1); vst1_lane_s16(data_ch2 + n, tmp1, 3); // Update filter_state_ch1[0], filter_state_ch1[1] // and filter_state_ch2[0], filter_state_ch2[1]. statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv); // Load data_ch1[n + 2] and data_ch2[n + 2]. tmp1 = vld1_lane_s16(data_ch1 + n + 2, tmp1, 1); tmp1 = vld1_lane_s16(data_ch2 + n + 2, tmp1, 3); datav = vrev32_s16(tmp1); a = vqdmlal_s16(statev, datav, factorv); tmp2 = vshrn_n_s32(a, 16); // Store data_ch1[n + 1] and data_ch2[n + 1]. vst1_lane_s16(data_ch1 + n + 1, tmp2, 1); vst1_lane_s16(data_ch2 + n + 1, tmp2, 3); // Update filter_state_ch1[0], filter_state_ch1[1] // and filter_state_ch2[0], filter_state_ch2[1]. statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp2, factorv); // Load data_ch1[n + 3] and data_ch2[n + 3]. tmp2 = vld1_lane_s16(data_ch1 + n + 3, tmp2, 1); tmp2 = vld1_lane_s16(data_ch2 + n + 3, tmp2, 3); datav = vrev32_s16(tmp2); } // Loop unrolling post-processing. a = vqdmlal_s16(statev, datav, factorv); tmp1 = vshrn_n_s32(a, 16); // Store data_ch1[n] and data_ch2[n]. vst1_lane_s16(data_ch1 + n, tmp1, 1); vst1_lane_s16(data_ch2 + n, tmp1, 3); // Update filter_state_ch1[0], filter_state_ch1[1] // and filter_state_ch2[0], filter_state_ch2[1]. statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv); // Store filter_state_ch1[0] and filter_state_ch2[0]. vst1q_lane_s32(filter_state_ch1, statev, 0); vst1q_lane_s32(filter_state_ch2, statev, 2); datav = vrev32_s16(tmp1); a = vqdmlal_s16(statev, datav, factorv); tmp2 = vshrn_n_s32(a, 16); // Store data_ch1[n + 1] and data_ch2[n + 1]. vst1_lane_s16(data_ch1 + n + 1, tmp2, 1); vst1_lane_s16(data_ch2 + n + 1, tmp2, 3); // Update filter_state_ch1[1] and filter_state_ch2[1]. statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp2, factorv); // Store filter_state_ch1[1] and filter_state_ch2[1]. vst1q_lane_s32(filter_state_ch1 + 1, statev, 1); vst1q_lane_s32(filter_state_ch2 + 1, statev, 3); }
int mult_cpx_conj_vector(int16_t *x1, int16_t *x2, int16_t *y, uint32_t N, int output_shift, int madd) { // Multiply elementwise the complex conjugate of x1 with x2. // x1 - input 1 in the format |Re0 Im0 Re1 Im1|,......,|Re(N-2) Im(N-2) Re(N-1) Im(N-1)| // We assume x1 with a dinamic of 15 bit maximum // // x2 - input 2 in the format |Re0 Im0 Re1 Im1|,......,|Re(N-2) Im(N-2) Re(N-1) Im(N-1)| // We assume x2 with a dinamic of 14 bit maximum /// // y - output in the format |Re0 Im0 Re1 Im1|,......,|Re(N-2) Im(N-2) Re(N-1) Im(N-1)| // // N - the size f the vectors (this function does N cpx mpy. WARNING: N>=4; // // output_shift - shift to be applied to generate output // // madd - add the output to y uint32_t i; // loop counter simd_q15_t *x1_128; simd_q15_t *x2_128; simd_q15_t *y_128; #if defined(__x86_64__) || defined(__i386__) simd_q15_t tmp_re,tmp_im; simd_q15_t tmpy0,tmpy1; #elif defined(__arm__) int32x4_t tmp_re,tmp_im; int32x4_t tmp_re1,tmp_im1; int16x4x2_t tmpy; int32x4_t shift = vdupq_n_s32(-output_shift); #endif x1_128 = (simd_q15_t *)&x1[0]; x2_128 = (simd_q15_t *)&x2[0]; y_128 = (simd_q15_t *)&y[0]; // we compute 4 cpx multiply for each loop for(i=0; i<(N>>2); i++) { #if defined(__x86_64__) || defined(__i386__) tmp_re = _mm_madd_epi16(*x1_128,*x2_128); tmp_im = _mm_shufflelo_epi16(*x1_128,_MM_SHUFFLE(2,3,0,1)); tmp_im = _mm_shufflehi_epi16(tmp_im,_MM_SHUFFLE(2,3,0,1)); tmp_im = _mm_sign_epi16(tmp_im,*(__m128i*)&conjug[0]); tmp_im = _mm_madd_epi16(tmp_im,*x2_128); tmp_re = _mm_srai_epi32(tmp_re,output_shift); tmp_im = _mm_srai_epi32(tmp_im,output_shift); tmpy0 = _mm_unpacklo_epi32(tmp_re,tmp_im); tmpy1 = _mm_unpackhi_epi32(tmp_re,tmp_im); if (madd==0) *y_128 = _mm_packs_epi32(tmpy0,tmpy1); else *y_128 += _mm_packs_epi32(tmpy0,tmpy1); #elif defined(__arm__) tmp_re = vmull_s16(((simdshort_q15_t *)x1_128)[0], ((simdshort_q15_t*)x2_128)[0]); //tmp_re = [Re(x1[0])Re(x2[0]) Im(x1[0])Im(x2[0]) Re(x1[1])Re(x2[1]) Im(x1[1])Im(x2[1])] tmp_re1 = vmull_s16(((simdshort_q15_t *)x1_128)[1], ((simdshort_q15_t*)x2_128)[1]); //tmp_re1 = [Re(x1[1])Re(x2[1]) Im(x1[1])Im(x2[1]) Re(x1[1])Re(x2[2]) Im(x1[1])Im(x2[2])] tmp_re = vcombine_s32(vpadd_s32(vget_low_s32(tmp_re),vget_high_s32(tmp_re)), vpadd_s32(vget_low_s32(tmp_re1),vget_high_s32(tmp_re1))); //tmp_re = [Re(ch[0])Re(rx[0])+Im(ch[0])Im(ch[0]) Re(ch[1])Re(rx[1])+Im(ch[1])Im(ch[1]) Re(ch[2])Re(rx[2])+Im(ch[2]) Im(ch[2]) Re(ch[3])Re(rx[3])+Im(ch[3])Im(ch[3])] tmp_im = vmull_s16(vrev32_s16(vmul_s16(((simdshort_q15_t*)x2_128)[0],*(simdshort_q15_t*)conjug)), ((simdshort_q15_t*)x1_128)[0]); //tmp_im = [-Im(ch[0])Re(rx[0]) Re(ch[0])Im(rx[0]) -Im(ch[1])Re(rx[1]) Re(ch[1])Im(rx[1])] tmp_im1 = vmull_s16(vrev32_s16(vmul_s16(((simdshort_q15_t*)x2_128)[1],*(simdshort_q15_t*)conjug)), ((simdshort_q15_t*)x1_128)[1]); //tmp_im1 = [-Im(ch[2])Re(rx[2]) Re(ch[2])Im(rx[2]) -Im(ch[3])Re(rx[3]) Re(ch[3])Im(rx[3])] tmp_im = vcombine_s32(vpadd_s32(vget_low_s32(tmp_im),vget_high_s32(tmp_im)), vpadd_s32(vget_low_s32(tmp_im1),vget_high_s32(tmp_im1))); //tmp_im = [-Im(ch[0])Re(rx[0])+Re(ch[0])Im(rx[0]) -Im(ch[1])Re(rx[1])+Re(ch[1])Im(rx[1]) -Im(ch[2])Re(rx[2])+Re(ch[2])Im(rx[2]) -Im(ch[3])Re(rx[3])+Re(ch[3])Im(rx[3])] tmp_re = vqshlq_s32(tmp_re,shift); tmp_im = vqshlq_s32(tmp_im,shift); tmpy = vzip_s16(vmovn_s32(tmp_re),vmovn_s32(tmp_im)); if (madd==0) *y_128 = vcombine_s16(tmpy.val[0],tmpy.val[1]); else *y_128 += vcombine_s16(tmpy.val[0],tmpy.val[1]); #endif x1_128++; x2_128++; y_128++; } _mm_empty(); _m_empty(); return(0); }