// CHECK-LABEL: define <1 x double> @test_vmulx_lane_f64_0() #0 {
// CHECK:   [[TMP0:%.*]] = bitcast i64 4599917171378402754 to <1 x double>
// CHECK:   [[TMP1:%.*]] = bitcast i64 4606655882138939123 to <1 x double>
// CHECK:   [[TMP2:%.*]] = bitcast <1 x double> [[TMP0]] to <8 x i8>
// CHECK:   [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x double>
// CHECK:   [[VGET_LANE:%.*]] = extractelement <1 x double> [[TMP3]], i32 0
// CHECK:   [[TMP4:%.*]] = bitcast <1 x double> [[TMP1]] to <8 x i8>
// CHECK:   [[TMP5:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double>
// CHECK:   [[VGET_LANE7:%.*]] = extractelement <1 x double> [[TMP5]], i32 0
// CHECK:   [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGET_LANE7]]) #2
// CHECK:   [[TMP6:%.*]] = bitcast <1 x double> [[TMP0]] to <8 x i8>
// CHECK:   [[TMP7:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x double>
// CHECK:   [[VSET_LANE:%.*]] = insertelement <1 x double> [[TMP7]], double [[VMULXD_F64_I]], i32 0
// CHECK:   ret <1 x double> [[VSET_LANE]]
float64x1_t test_vmulx_lane_f64_0() {
      float64x1_t arg1;
      float64x1_t arg2;
      float64x1_t result;
      float64_t sarg1, sarg2, sres;
      arg1 = vcreate_f64(UINT64_C(0x3fd6304bc43ab5c2));
      arg2 = vcreate_f64(UINT64_C(0x3fee211e215aeef3));
      result = vmulx_lane_f64(arg1, arg2, 0);
      return result;
}
// CHECK-LABEL: test_vmulx_lane_f64_0:
float64x1_t test_vmulx_lane_f64_0() {
      float64x1_t arg1;
      float64x1_t arg2;
      float64x1_t result;
      float64_t sarg1, sarg2, sres;
      arg1 = vcreate_f64(UINT64_C(0x3fd6304bc43ab5c2));
      arg2 = vcreate_f64(UINT64_C(0x3fee211e215aeef3));
      result = vmulx_lane_f64(arg1, arg2, 0);
// CHECK: adrp x[[ADDRLO:[0-9]+]]
// CHECK: ldr d0, [x[[ADDRLO]],
// CHECK: adrp x[[ADDRLO:[0-9]+]]
// CHECK: ldr d1, [x[[ADDRLO]],
// CHECK: fmulx d0, d1, d0
      return result;
}
// CHECK-LABEL: define <1 x double> @test_vmulx_lane_f64(<1 x double> %a, <1 x double> %b) #0 {
// CHECK:   [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
// CHECK:   [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
// CHECK:   [[VGET_LANE:%.*]] = extractelement <1 x double> [[TMP1]], i32 0
// CHECK:   [[TMP2:%.*]] = bitcast <1 x double> %b to <8 x i8>
// CHECK:   [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x double>
// CHECK:   [[VGET_LANE6:%.*]] = extractelement <1 x double> [[TMP3]], i32 0
// CHECK:   [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGET_LANE6]]) #2
// CHECK:   [[TMP4:%.*]] = bitcast <1 x double> %a to <8 x i8>
// CHECK:   [[TMP5:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double>
// CHECK:   [[VSET_LANE:%.*]] = insertelement <1 x double> [[TMP5]], double [[VMULXD_F64_I]], i32 0
// CHECK:   ret <1 x double> [[VSET_LANE]]
float64x1_t test_vmulx_lane_f64(float64x1_t a, float64x1_t b) {
  return vmulx_lane_f64(a, b, 0);
}
// CHECK-LABEL: test_vmulx_lane_f64
float64x1_t test_vmulx_lane_f64(float64x1_t a, float64x1_t b) {
  return vmulx_lane_f64(a, b, 0);
  // CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+.d\[0\]|d[0-9]+}}
}