void test_vget_laneu8 (void) { uint8_t out_uint8_t; uint8x8_t arg0_uint8x8_t; out_uint8_t = vget_lane_u8 (arg0_uint8x8_t, 1); }
void vpx_d135_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const uint8x8_t XABCD_u8 = vld1_u8(above - 1); const uint64x1_t XABCD = vreinterpret_u64_u8(XABCD_u8); const uint64x1_t ____XABC = vshl_n_u64(XABCD, 32); const uint32x2_t zero = vdup_n_u32(0); const uint32x2_t IJKL = vld1_lane_u32((const uint32_t *)left, zero, 0); const uint8x8_t IJKL_u8 = vreinterpret_u8_u32(IJKL); const uint64x1_t LKJI____ = vreinterpret_u64_u8(vrev32_u8(IJKL_u8)); const uint64x1_t LKJIXABC = vorr_u64(LKJI____, ____XABC); const uint8x8_t KJIXABC_ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 8)); const uint8x8_t JIXABC__ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 16)); const uint8_t D = vget_lane_u8(XABCD_u8, 4); const uint8x8_t JIXABCD_ = vset_lane_u8(D, JIXABC__, 6); const uint8x8_t LKJIXABC_u8 = vreinterpret_u8_u64(LKJIXABC); const uint8x8_t avg1 = vhadd_u8(JIXABCD_, LKJIXABC_u8); const uint8x8_t avg2 = vrhadd_u8(avg1, KJIXABC_); const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2); const uint32x2_t r3 = vreinterpret_u32_u8(avg2); const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); const uint32x2_t r0 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); vst1_lane_u32((uint32_t *)(dst + 0 * stride), r0, 0); vst1_lane_u32((uint32_t *)(dst + 1 * stride), r1, 0); vst1_lane_u32((uint32_t *)(dst + 2 * stride), r2, 0); vst1_lane_u32((uint32_t *)(dst + 3 * stride), r3, 0); }
int main (int argc, char **argv) { int8x8_t a = vabs_s8 (vdup_n_s8 (-128)); /* Should all be -128. */ uint8x8_t b = vcltz_s8 (a); /* Should all be true i.e. -1. */ if (vget_lane_u8 (b, 1)) return 0; abort (); }
uint8_t test_vget_lane_u8(uint8x8_t v1) { // CHECK: test_vget_lane_u8 return vget_lane_u8(v1, 7); // CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.b[7] }
uint8_t test_vget_lane_u8(uint8x8_t a) { // CHECK-LABEL: test_vget_lane_u8: // CHECK-NEXT: umov.b w0, v0[7] // CHECK-NEXT: ret return vget_lane_u8(a, 7); }
void AudioBlockPanStereoToStereo_NEON( const float aInputL[WEBAUDIO_BLOCK_SIZE], const float aInputR[WEBAUDIO_BLOCK_SIZE], float aGainL[WEBAUDIO_BLOCK_SIZE], float aGainR[WEBAUDIO_BLOCK_SIZE], const bool aIsOnTheLeft[WEBAUDIO_BLOCK_SIZE], float aOutputL[WEBAUDIO_BLOCK_SIZE], float aOutputR[WEBAUDIO_BLOCK_SIZE]) { ASSERT_ALIGNED(aInputL); ASSERT_ALIGNED(aInputR); ASSERT_ALIGNED(aGainL); ASSERT_ALIGNED(aGainR); ASSERT_ALIGNED(aIsOnTheLeft); ASSERT_ALIGNED(aOutputL); ASSERT_ALIGNED(aOutputR); float32x4_t vinL0, vinL1; float32x4_t vinR0, vinR1; float32x4_t voutL0, voutL1; float32x4_t voutR0, voutR1; float32x4_t vscaleL0, vscaleL1; float32x4_t vscaleR0, vscaleR1; float32x4_t onleft0, onleft1, notonleft0, notonleft1; float32x4_t zero = vmovq_n_f32(0); uint8x8_t isOnTheLeft; // Although MSVC throws uninitialized value warning for voutL0 and voutL1, // since we fill all lanes by vsetq_lane_f32, we can ignore it. But to avoid // compiler warning, set zero. voutL0 = zero; voutL1 = zero; for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; i += 8) { vinL0 = vld1q_f32(ADDRESS_OF(aInputL, i)); vinL1 = vld1q_f32(ADDRESS_OF(aInputL, i + 4)); vinR0 = vld1q_f32(ADDRESS_OF(aInputR, i)); vinR1 = vld1q_f32(ADDRESS_OF(aInputR, i + 4)); vscaleL0 = vld1q_f32(ADDRESS_OF(aGainL, i)); vscaleL1 = vld1q_f32(ADDRESS_OF(aGainL, i + 4)); vscaleR0 = vld1q_f32(ADDRESS_OF(aGainR, i)); vscaleR1 = vld1q_f32(ADDRESS_OF(aGainR, i + 4)); // Load output with boolean "on the left" values. This assumes that // bools are stored as a single byte. isOnTheLeft = vld1_u8((uint8_t*)&aIsOnTheLeft[i]); voutL0 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 0), voutL0, 0); voutL0 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 1), voutL0, 1); voutL0 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 2), voutL0, 2); voutL0 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 3), voutL0, 3); voutL1 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 4), voutL1, 0); voutL1 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 5), voutL1, 1); voutL1 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 6), voutL1, 2); voutL1 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 7), voutL1, 3); // Convert the boolean values into masks by setting all bits to 1 // if true. voutL0 = (float32x4_t)vcgtq_f32(voutL0, zero); voutL1 = (float32x4_t)vcgtq_f32(voutL1, zero); // The right output masks are the same as the left masks voutR0 = voutL0; voutR1 = voutL1; // Calculate left channel assuming isOnTheLeft onleft0 = vmlaq_f32(vinL0, vinR0, vscaleL0); onleft1 = vmlaq_f32(vinL1, vinR1, vscaleL0); // Calculate left channel assuming not isOnTheLeft notonleft0 = vmulq_f32(vinL0, vscaleL0); notonleft1 = vmulq_f32(vinL1, vscaleL1); // Write results using previously stored masks voutL0 = vbslq_f32((uint32x4_t)voutL0, onleft0, notonleft0); voutL1 = vbslq_f32((uint32x4_t)voutL1, onleft1, notonleft1); // Calculate right channel assuming isOnTheLeft onleft0 = vmulq_f32(vinR0, vscaleR0); onleft1 = vmulq_f32(vinR1, vscaleR1); // Calculate right channel assuming not isOnTheLeft notonleft0 = vmlaq_f32(vinR0, vinL0, vscaleR0); notonleft1 = vmlaq_f32(vinR1, vinL1, vscaleR1); // Write results using previously stored masks voutR0 = vbslq_f32((uint32x4_t)voutR0, onleft0, notonleft0); voutR1 = vbslq_f32((uint32x4_t)voutR1, onleft1, notonleft1); vst1q_f32(ADDRESS_OF(aOutputL, i), voutL0); vst1q_f32(ADDRESS_OF(aOutputL, i + 4), voutL1); vst1q_f32(ADDRESS_OF(aOutputR, i), voutR0); vst1q_f32(ADDRESS_OF(aOutputR, i + 4), voutR1); } }
// CHECK-LABEL: define i8 @test_vget_lane_u8(<8 x i8> %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <8 x i8> %a, i32 7 // CHECK: ret i8 [[VGET_LANE]] uint8_t test_vget_lane_u8(uint8x8_t a) { return vget_lane_u8(a, 7); }
uint8_t test_vget_lane_u8_before (uint8x8_t in) { /* { dg-error "lane -1 out of range 0 - 7" "" {target *-*-*} 0 } */ return vget_lane_u8 (in, -1); }
uint8_t test_vget_lane_u8_beyond (uint8x8_t in) { /* { dg-error "lane 8 out of range 0 - 7" "" {target *-*-*} 0 } */ return vget_lane_u8 (in, 8); }