// CHECK-LABEL: test_vset_vget_lane_f16 int test_vset_vget_lane_f16(float16x4_t a) { float16x4_t b; b = vset_lane_f16(3.5, a, 3); float16_t c = vget_lane_f16(b, 3); return (int)c; // CHECK: movz x{{[0-9]+}}, #3 }
float32_t test_vget_lane_f16(float16x4_t a) { // CHECK-LABEL: test_vget_lane_f16: // CHECK-NEXT: umov.h w8, v0[1] // CHECK-NEXT: fmov s0, w8 // CHECK-NEXT: fcvt s0, h0 // CHECK-NEXT: ret return vget_lane_f16(a, 1); }
// CHECK: test_vset_lane_f16_2 float16x4_t test_vset_lane_f16_2(float16x4_t v1) { float16_t a = vget_lane_f16(v1, 0); return vset_lane_f16(a, v1, 3); // CHECK: ins {{v[0-9]+}}.h[3], {{v[0-9]+}}.h[0] }
// CHECK: test_vget_lane_f16_2 float test_vget_lane_f16_2(float16x4_t v1) { float16_t a = vget_lane_f16(v1, 3); return (float)a; // CHECK: dup {{h[0-9]+}}, {{v[0-9]+}}.h[3] }
// CHECK-LABEL: define float @test_vget_lane_f16(<4 x half> %a) #0 { // CHECK: [[__REINT_242:%.*]] = alloca <4 x half>, align 8 // CHECK: [[__REINT1_242:%.*]] = alloca i16, align 2 // CHECK: store <4 x half> %a, <4 x half>* [[__REINT_242]], align 8 // CHECK: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_242]] to <4 x i16>* // CHECK: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8 // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> [[TMP1]] to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x i16> // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP3]], i32 1 // CHECK: store i16 [[VGET_LANE]], i16* [[__REINT1_242]], align 2 // CHECK: [[TMP4:%.*]] = bitcast i16* [[__REINT1_242]] to half* // CHECK: [[TMP5:%.*]] = load half, half* [[TMP4]], align 2 // CHECK: [[CONV:%.*]] = fpext half [[TMP5]] to float // CHECK: ret float [[CONV]] float32_t test_vget_lane_f16(float16x4_t a) { return vget_lane_f16(a, 1); }