void idct_dequant_0_2x_neon( int16_t *q, int16_t dq, unsigned char *dst, int stride) { unsigned char *dst0; int i, a0, a1; int16x8x2_t q2Add; int32x2_t d2s32, d4s32; uint8x8_t d2u8, d4u8; uint16x8_t q1u16, q2u16; a0 = ((q[0] * dq) + 4) >> 3; a1 = ((q[16] * dq) + 4) >> 3; q[0] = q[16] = 0; q2Add.val[0] = vdupq_n_s16((int16_t)a0); q2Add.val[1] = vdupq_n_s16((int16_t)a1); for (i = 0; i < 2; i++, dst += 4) { dst0 = dst; d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 0); dst0 += stride; d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 1); dst0 += stride; d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 0); dst0 += stride; d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 1); q1u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]), vreinterpret_u8_s32(d2s32)); q2u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]), vreinterpret_u8_s32(d4s32)); d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16)); d4u8 = vqmovun_s16(vreinterpretq_s16_u16(q2u16)); d2s32 = vreinterpret_s32_u8(d2u8); d4s32 = vreinterpret_s32_u8(d4u8); dst0 = dst; vst1_lane_s32((int32_t *)dst0, d2s32, 0); dst0 += stride; vst1_lane_s32((int32_t *)dst0, d2s32, 1); dst0 += stride; vst1_lane_s32((int32_t *)dst0, d4s32, 0); dst0 += stride; vst1_lane_s32((int32_t *)dst0, d4s32, 1); } return; }
void test_vld1_lanes32 (void) { int32x2_t out_int32x2_t; int32x2_t arg1_int32x2_t; out_int32x2_t = vld1_lane_s32 (0, arg1_int32x2_t, 1); }
void test_ld1st1(int8x8_t small, int8x16_t big, void *addr) { vld1_lane_s8(addr, small, 7); vld1_lane_s16(addr, small, 3); vld1_lane_s32(addr, small, 1); vld1_lane_s64(addr, small, 0); vld1q_lane_s8(addr, big, 15); vld1q_lane_s16(addr, big, 7); vld1q_lane_s32(addr, big, 3); vld1q_lane_s64(addr, big, 1); vld1_lane_s8(addr, small, 8); // expected-error {{argument should be a value from 0 to 7}} vld1_lane_s16(addr, small, 4); // expected-error {{argument should be a value from 0 to 3}} vld1_lane_s32(addr, small, 2); // expected-error {{argument should be a value from 0 to 1}} vld1_lane_s64(addr, small, 1); // expected-error {{argument should be a value from 0 to 0}} vld1q_lane_s8(addr, big, 16); // expected-error {{argument should be a value from 0 to 15}} vld1q_lane_s16(addr, big, 8); // expected-error {{argument should be a value from 0 to 7}} vld1q_lane_s32(addr, big, 4); // expected-error {{argument should be a value from 0 to 3}} vld1q_lane_s64(addr, big, 2); // expected-error {{argument should be a value from 0 to 1}} vst1_lane_s8(addr, small, 7); vst1_lane_s16(addr, small, 3); vst1_lane_s32(addr, small, 1); vst1_lane_s64(addr, small, 0); vst1q_lane_s8(addr, big, 15); vst1q_lane_s16(addr, big, 7); vst1q_lane_s32(addr, big, 3); vst1q_lane_s64(addr, big, 1); vst1_lane_s8(addr, small, 8); // expected-error {{argument should be a value from 0 to 7}} vst1_lane_s16(addr, small, 4); // expected-error {{argument should be a value from 0 to 3}} vst1_lane_s32(addr, small, 2); // expected-error {{argument should be a value from 0 to 1}} vst1_lane_s64(addr, small, 1); // expected-error {{argument should be a value from 0 to 0}} vst1q_lane_s8(addr, big, 16); // expected-error {{argument should be a value from 0 to 15}} vst1q_lane_s16(addr, big, 8); // expected-error {{argument should be a value from 0 to 7}} vst1q_lane_s32(addr, big, 4); // expected-error {{argument should be a value from 0 to 3}} vst1q_lane_s64(addr, big, 2); // expected-error {{argument should be a value from 0 to 1}} }
void WebRtcIsacfix_AllpassFilter2FixDec16Neon( int16_t* data_ch1, // Input and output in channel 1, in Q0 int16_t* data_ch2, // Input and output in channel 2, in Q0 const int16_t* factor_ch1, // Scaling factor for channel 1, in Q15 const int16_t* factor_ch2, // Scaling factor for channel 2, in Q15 const int length, // Length of the data buffers int32_t* filter_state_ch1, // Filter state for channel 1, in Q16 int32_t* filter_state_ch2) { // Filter state for channel 2, in Q16 assert(length % 2 == 0); int n = 0; int16x4_t factorv; int16x4_t datav; int32x4_t statev; int32x2_t tmp; // Load factor_ch1 and factor_ch2. tmp = vld1_dup_s32((int32_t*)factor_ch1); tmp = vld1_lane_s32((int32_t*)factor_ch2, tmp, 1); factorv = vreinterpret_s16_s32(tmp); // Load filter_state_ch1[0] and filter_state_ch2[0]. statev = vld1q_dup_s32(filter_state_ch1); statev = vld1q_lane_s32(filter_state_ch2, statev, 2); // Loop unrolling preprocessing. int32x4_t a; int16x4_t tmp1, tmp2; // Load data_ch1[0] and data_ch2[0]. datav = vld1_dup_s16(data_ch1); datav = vld1_lane_s16(data_ch2, datav, 2); a = vqdmlal_s16(statev, datav, factorv); tmp1 = vshrn_n_s32(a, 16); // Update filter_state_ch1[0] and filter_state_ch2[0]. statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv); // Load filter_state_ch1[1] and filter_state_ch2[1]. statev = vld1q_lane_s32(filter_state_ch1 + 1, statev, 1); statev = vld1q_lane_s32(filter_state_ch2 + 1, statev, 3); // Load data_ch1[1] and data_ch2[1]. tmp1 = vld1_lane_s16(data_ch1 + 1, tmp1, 1); tmp1 = vld1_lane_s16(data_ch2 + 1, tmp1, 3); datav = vrev32_s16(tmp1); // Loop unrolling processing. for (n = 0; n < length - 2; n += 2) { a = vqdmlal_s16(statev, datav, factorv); tmp1 = vshrn_n_s32(a, 16); // Store data_ch1[n] and data_ch2[n]. vst1_lane_s16(data_ch1 + n, tmp1, 1); vst1_lane_s16(data_ch2 + n, tmp1, 3); // Update filter_state_ch1[0], filter_state_ch1[1] // and filter_state_ch2[0], filter_state_ch2[1]. statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv); // Load data_ch1[n + 2] and data_ch2[n + 2]. tmp1 = vld1_lane_s16(data_ch1 + n + 2, tmp1, 1); tmp1 = vld1_lane_s16(data_ch2 + n + 2, tmp1, 3); datav = vrev32_s16(tmp1); a = vqdmlal_s16(statev, datav, factorv); tmp2 = vshrn_n_s32(a, 16); // Store data_ch1[n + 1] and data_ch2[n + 1]. vst1_lane_s16(data_ch1 + n + 1, tmp2, 1); vst1_lane_s16(data_ch2 + n + 1, tmp2, 3); // Update filter_state_ch1[0], filter_state_ch1[1] // and filter_state_ch2[0], filter_state_ch2[1]. statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp2, factorv); // Load data_ch1[n + 3] and data_ch2[n + 3]. tmp2 = vld1_lane_s16(data_ch1 + n + 3, tmp2, 1); tmp2 = vld1_lane_s16(data_ch2 + n + 3, tmp2, 3); datav = vrev32_s16(tmp2); } // Loop unrolling post-processing. a = vqdmlal_s16(statev, datav, factorv); tmp1 = vshrn_n_s32(a, 16); // Store data_ch1[n] and data_ch2[n]. vst1_lane_s16(data_ch1 + n, tmp1, 1); vst1_lane_s16(data_ch2 + n, tmp1, 3); // Update filter_state_ch1[0], filter_state_ch1[1] // and filter_state_ch2[0], filter_state_ch2[1]. statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv); // Store filter_state_ch1[0] and filter_state_ch2[0]. vst1q_lane_s32(filter_state_ch1, statev, 0); vst1q_lane_s32(filter_state_ch2, statev, 2); datav = vrev32_s16(tmp1); a = vqdmlal_s16(statev, datav, factorv); tmp2 = vshrn_n_s32(a, 16); // Store data_ch1[n + 1] and data_ch2[n + 1]. vst1_lane_s16(data_ch1 + n + 1, tmp2, 1); vst1_lane_s16(data_ch2 + n + 1, tmp2, 3); // Update filter_state_ch1[1] and filter_state_ch2[1]. statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp2, factorv); // Store filter_state_ch1[1] and filter_state_ch2[1]. vst1q_lane_s32(filter_state_ch1 + 1, statev, 1); vst1q_lane_s32(filter_state_ch2 + 1, statev, 3); }
void idct_dequant_full_2x_neon( int16_t *q, int16_t *dq, unsigned char *dst, int stride) { unsigned char *dst0, *dst1; int32x2_t d28, d29, d30, d31; int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; int16x8_t qEmpty = vdupq_n_s16(0); int32x4x2_t q2tmp0, q2tmp1; int16x8x2_t q2tmp2, q2tmp3; int16x4_t dLow0, dLow1, dHigh0, dHigh1; d28 = d29 = d30 = d31 = vdup_n_s32(0); // load dq q0 = vld1q_s16(dq); dq += 8; q1 = vld1q_s16(dq); // load q q2 = vld1q_s16(q); vst1q_s16(q, qEmpty); q += 8; q3 = vld1q_s16(q); vst1q_s16(q, qEmpty); q += 8; q4 = vld1q_s16(q); vst1q_s16(q, qEmpty); q += 8; q5 = vld1q_s16(q); vst1q_s16(q, qEmpty); // load src from dst dst0 = dst; dst1 = dst + 4; d28 = vld1_lane_s32((const int32_t *)dst0, d28, 0); dst0 += stride; d28 = vld1_lane_s32((const int32_t *)dst1, d28, 1); dst1 += stride; d29 = vld1_lane_s32((const int32_t *)dst0, d29, 0); dst0 += stride; d29 = vld1_lane_s32((const int32_t *)dst1, d29, 1); dst1 += stride; d30 = vld1_lane_s32((const int32_t *)dst0, d30, 0); dst0 += stride; d30 = vld1_lane_s32((const int32_t *)dst1, d30, 1); dst1 += stride; d31 = vld1_lane_s32((const int32_t *)dst0, d31, 0); d31 = vld1_lane_s32((const int32_t *)dst1, d31, 1); q2 = vmulq_s16(q2, q0); q3 = vmulq_s16(q3, q1); q4 = vmulq_s16(q4, q0); q5 = vmulq_s16(q5, q1); // vswp dLow0 = vget_low_s16(q2); dHigh0 = vget_high_s16(q2); dLow1 = vget_low_s16(q4); dHigh1 = vget_high_s16(q4); q2 = vcombine_s16(dLow0, dLow1); q4 = vcombine_s16(dHigh0, dHigh1); dLow0 = vget_low_s16(q3); dHigh0 = vget_high_s16(q3); dLow1 = vget_low_s16(q5); dHigh1 = vget_high_s16(q5); q3 = vcombine_s16(dLow0, dLow1); q5 = vcombine_s16(dHigh0, dHigh1); q6 = vqdmulhq_n_s16(q4, sinpi8sqrt2); q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2); q8 = vqdmulhq_n_s16(q4, cospi8sqrt2minus1); q9 = vqdmulhq_n_s16(q5, cospi8sqrt2minus1); q10 = vqaddq_s16(q2, q3); q11 = vqsubq_s16(q2, q3); q8 = vshrq_n_s16(q8, 1); q9 = vshrq_n_s16(q9, 1); q4 = vqaddq_s16(q4, q8); q5 = vqaddq_s16(q5, q9); q2 = vqsubq_s16(q6, q5); q3 = vqaddq_s16(q7, q4); q4 = vqaddq_s16(q10, q3); q5 = vqaddq_s16(q11, q2); q6 = vqsubq_s16(q11, q2); q7 = vqsubq_s16(q10, q3); q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6)); q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7)); q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]), vreinterpretq_s16_s32(q2tmp1.val[0])); q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]), vreinterpretq_s16_s32(q2tmp1.val[1])); // loop 2 q8 = vqdmulhq_n_s16(q2tmp2.val[1], sinpi8sqrt2); q9 = vqdmulhq_n_s16(q2tmp3.val[1], sinpi8sqrt2); q10 = vqdmulhq_n_s16(q2tmp2.val[1], cospi8sqrt2minus1); q11 = vqdmulhq_n_s16(q2tmp3.val[1], cospi8sqrt2minus1); q2 = vqaddq_s16(q2tmp2.val[0], q2tmp3.val[0]); q3 = vqsubq_s16(q2tmp2.val[0], q2tmp3.val[0]); q10 = vshrq_n_s16(q10, 1); q11 = vshrq_n_s16(q11, 1); q10 = vqaddq_s16(q2tmp2.val[1], q10); q11 = vqaddq_s16(q2tmp3.val[1], q11); q8 = vqsubq_s16(q8, q11); q9 = vqaddq_s16(q9, q10); q4 = vqaddq_s16(q2, q9); q5 = vqaddq_s16(q3, q8); q6 = vqsubq_s16(q3, q8); q7 = vqsubq_s16(q2, q9); q4 = vrshrq_n_s16(q4, 3); q5 = vrshrq_n_s16(q5, 3); q6 = vrshrq_n_s16(q6, 3); q7 = vrshrq_n_s16(q7, 3); q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6)); q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7)); q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]), vreinterpretq_s16_s32(q2tmp1.val[0])); q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]), vreinterpretq_s16_s32(q2tmp1.val[1])); q4 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[0]), vreinterpret_u8_s32(d28))); q5 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[1]), vreinterpret_u8_s32(d29))); q6 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[0]), vreinterpret_u8_s32(d30))); q7 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[1]), vreinterpret_u8_s32(d31))); d28 = vreinterpret_s32_u8(vqmovun_s16(q4)); d29 = vreinterpret_s32_u8(vqmovun_s16(q5)); d30 = vreinterpret_s32_u8(vqmovun_s16(q6)); d31 = vreinterpret_s32_u8(vqmovun_s16(q7)); dst0 = dst; dst1 = dst + 4; vst1_lane_s32((int32_t *)dst0, d28, 0); dst0 += stride; vst1_lane_s32((int32_t *)dst1, d28, 1); dst1 += stride; vst1_lane_s32((int32_t *)dst0, d29, 0); dst0 += stride; vst1_lane_s32((int32_t *)dst1, d29, 1); dst1 += stride; vst1_lane_s32((int32_t *)dst0, d30, 0); dst0 += stride; vst1_lane_s32((int32_t *)dst1, d30, 1); dst1 += stride; vst1_lane_s32((int32_t *)dst0, d31, 0); vst1_lane_s32((int32_t *)dst1, d31, 1); return; }