static INLINE void iadst_butterfly_lane_1_0_bd12_neon(const int32x4_t in0, const int32x4_t in1, const int32x2_t c, int64x2_t *const s0, int64x2_t *const s1) { const int64x2_t t0_lo = vmull_lane_s32(vget_low_s32(in0), c, 1); const int64x2_t t1_lo = vmull_lane_s32(vget_low_s32(in0), c, 0); const int64x2_t t0_hi = vmull_lane_s32(vget_high_s32(in0), c, 1); const int64x2_t t1_hi = vmull_lane_s32(vget_high_s32(in0), c, 0); s0[0] = vmlal_lane_s32(t0_lo, vget_low_s32(in1), c, 0); s1[0] = vmlsl_lane_s32(t1_lo, vget_low_s32(in1), c, 1); s0[1] = vmlal_lane_s32(t0_hi, vget_high_s32(in1), c, 0); s1[1] = vmlsl_lane_s32(t1_hi, vget_high_s32(in1), c, 1); }
void test_vmlsl_lanes32 (void) { int64x2_t out_int64x2_t; int64x2_t arg0_int64x2_t; int32x2_t arg1_int32x2_t; int32x2_t arg2_int32x2_t; out_int64x2_t = vmlsl_lane_s32 (arg0_int64x2_t, arg1_int32x2_t, arg2_int32x2_t, 1); }
int64x2_t test_vmlsl_lane_s32(int64x2_t a, int32x2_t b, int32x2_t v) { // CHECK: test_vmlsl_lane_s32 return vmlsl_lane_s32(a, b, v, 1); // CHECK: smlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] }