void test_vgetQ_lanes16 (void) { int16_t out_int16_t; int16x8_t arg0_int16x8_t; out_int16_t = vgetq_lane_s16 (arg0_int16x8_t, 1); }
/* s16x8 mv mul */ void mw_neon_mv_mul_s16x8(short * A, int Row, int T, short * B, short * C) { int i = 0; int k = 0; int16x8_t neon_b, neon_c; int16x8_t neon_a0, neon_a1, neon_a2, neon_a3, neon_a4, neon_a5, neon_a6, neon_a7; int16x8_t neon_b0, neon_b1, neon_b2, neon_b3, neon_b4, neon_b5, neon_b6, neon_b7; for (i = 0; i < Row; i+=8) { neon_c = vmovq_n_s16(0); for (k = 0; k < T; k+=8) { int j = k * T + i; neon_a0 = vld1q_s16(A + j); j+=Row; neon_a1 = vld1q_s16(A + j); j+=Row; neon_a2 = vld1q_s16(A + j); j+=Row; neon_a3 = vld1q_s16(A + j); j+=Row; neon_a4 = vld1q_s16(A + j); j+=Row; neon_a5 = vld1q_s16(A + j); j+=Row; neon_a6 = vld1q_s16(A + j); j+=Row; neon_a7 = vld1q_s16(A + j); neon_b = vld1q_s16(B + k); neon_b0 = vdupq_n_s16(vgetq_lane_s16(neon_b, 0)); neon_b1 = vdupq_n_s16(vgetq_lane_s16(neon_b, 1)); neon_b2 = vdupq_n_s16(vgetq_lane_s16(neon_b, 2)); neon_b3 = vdupq_n_s16(vgetq_lane_s16(neon_b, 3)); neon_b4 = vdupq_n_s16(vgetq_lane_s16(neon_b, 4)); neon_b5 = vdupq_n_s16(vgetq_lane_s16(neon_b, 5)); neon_b6 = vdupq_n_s16(vgetq_lane_s16(neon_b, 6)); neon_b7 = vdupq_n_s16(vgetq_lane_s16(neon_b, 7)); neon_c = vaddq_s16(vmulq_s16(neon_a0, neon_b0), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a1, neon_b1), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a2, neon_b2), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a3, neon_b3), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a4, neon_b4), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a5, neon_b5), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a6, neon_b6), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a7, neon_b7), neon_c); } vst1q_s16(C + i, neon_c); } }
int32_t test_vgetq_lane_s16(int16x8_t v1) { // CHECK: test_vgetq_lane_s16 return vgetq_lane_s16(v1, 6)+1; // CHECK: smov {{w[0-9]+}}, {{v[0-9]+}}.h[6] }
rfx_dwt_2d_decode_block_horiz_NEON(INT16 * l, INT16 * h, INT16 * dst, int subband_width) { int y, n; INT16 * l_ptr = l; INT16 * h_ptr = h; INT16 * dst_ptr = dst; for (y = 0; y < subband_width; y++) { /* Even coefficients */ for (n = 0; n < subband_width; n+=8) { // dst[2n] = l[n] - ((h[n-1] + h[n] + 1) >> 1); int16x8_t l_n = vld1q_s16(l_ptr); int16x8_t h_n = vld1q_s16(h_ptr); int16x8_t h_n_m = vld1q_s16(h_ptr - 1); if (n == 0) { int16_t first = vgetq_lane_s16(h_n_m, 1); h_n_m = vsetq_lane_s16(first, h_n_m, 0); } int16x8_t tmp_n = vaddq_s16(h_n, h_n_m); tmp_n = vaddq_s16(tmp_n, vdupq_n_s16(1)); tmp_n = vshrq_n_s16(tmp_n, 1); int16x8_t dst_n = vsubq_s16(l_n, tmp_n); vst1q_s16(l_ptr, dst_n); l_ptr+=8; h_ptr+=8; } l_ptr -= subband_width; h_ptr -= subband_width; /* Odd coefficients */ for (n = 0; n < subband_width; n+=8) { // dst[2n + 1] = (h[n] << 1) + ((dst[2n] + dst[2n + 2]) >> 1); int16x8_t h_n = vld1q_s16(h_ptr); h_n = vshlq_n_s16(h_n, 1); int16x8x2_t dst_n; dst_n.val[0] = vld1q_s16(l_ptr); int16x8_t dst_n_p = vld1q_s16(l_ptr + 1); if (n == subband_width - 8) { int16_t last = vgetq_lane_s16(dst_n_p, 6); dst_n_p = vsetq_lane_s16(last, dst_n_p, 7); } dst_n.val[1] = vaddq_s16(dst_n_p, dst_n.val[0]); dst_n.val[1] = vshrq_n_s16(dst_n.val[1], 1); dst_n.val[1] = vaddq_s16(dst_n.val[1], h_n); vst2q_s16(dst_ptr, dst_n); l_ptr+=8; h_ptr+=8; dst_ptr+=16; } } }
int16_t test_vgetq_lane_s16(int16x8_t a) { // CHECK-LABEL: test_vgetq_lane_s16: // CHECK-NEXT: umov.h w0, v0[7] // CHECK-NEXT: ret return vgetq_lane_s16(a, 7); }
/* s16x8 mm mul */ void mw_neon_mm_mul_s16x8(short * A, int Row, int T, short * B, int Col, short * C) { int i, k, j; int16x8_t neon_b, neon_c; int16x8_t neon_a0, neon_a1, neon_a2, neon_a3, neon_a4, neon_a5, neon_a6, neon_a7; int16x8_t neon_b0, neon_b1, neon_b2, neon_b3, neon_b4, neon_b5, neon_b6, neon_b7; for (i = 0; i < Row; i+=8) { for (k = 0; k < Col; k+=1) { neon_c = vmovq_n_s16(0); for (j = 0; j < T; j+=8) { int j_T = j * T + i; int k_Row = k * Row; neon_a0 = vld1q_s16(A + j_T); j_T+=Row; neon_a1 = vld1q_s16(A + j_T); j_T+=Row; neon_a2 = vld1q_s16(A + j_T); j_T+=Row; neon_a3 = vld1q_s16(A + j_T); j_T+=Row; neon_a4 = vld1q_s16(A + j_T); j_T+=Row; neon_a5 = vld1q_s16(A + j_T); j_T+=Row; neon_a6 = vld1q_s16(A + j_T); j_T+=Row; neon_a7 = vld1q_s16(A + j_T); neon_b = vld1q_s16(B + k_Row + j); neon_b0 = vdupq_n_s16(vgetq_lane_s16(neon_b, 0)); neon_b1 = vdupq_n_s16(vgetq_lane_s16(neon_b, 1)); neon_b2 = vdupq_n_s16(vgetq_lane_s16(neon_b, 2)); neon_b3 = vdupq_n_s16(vgetq_lane_s16(neon_b, 3)); neon_b4 = vdupq_n_s16(vgetq_lane_s16(neon_b, 4)); neon_b5 = vdupq_n_s16(vgetq_lane_s16(neon_b, 5)); neon_b6 = vdupq_n_s16(vgetq_lane_s16(neon_b, 6)); neon_b7 = vdupq_n_s16(vgetq_lane_s16(neon_b, 7)); neon_c = vaddq_s16(vmulq_s16(neon_a0, neon_b0), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a1, neon_b1), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a2, neon_b2), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a3, neon_b3), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a4, neon_b4), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a5, neon_b5), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a6, neon_b6), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a7, neon_b7), neon_c); vst1q_lane_s16(C + k_Row + i, neon_c, 0); vst1q_lane_s16(C + k_Row + i + 1, neon_c, 1); vst1q_lane_s16(C + k_Row + i + 2, neon_c, 2); vst1q_lane_s16(C + k_Row + i + 3, neon_c, 3); vst1q_lane_s16(C + k_Row + i + 4, neon_c, 4); vst1q_lane_s16(C + k_Row + i + 5, neon_c, 5); vst1q_lane_s16(C + k_Row + i + 6, neon_c, 6); vst1q_lane_s16(C + k_Row + i + 7, neon_c, 7); } } } }
// CHECK-LABEL: define i16 @test_vgetq_lane_s16(<8 x i16> %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7 // CHECK: ret i16 [[VGETQ_LANE]] int16_t test_vgetq_lane_s16(int16x8_t a) { return vgetq_lane_s16(a, 7); }