test_vdupq_lane_s64 () { int64x1_t a; int64x2_t b; int i; int64_t c[1]; int64_t d[2]; c[0] = 0; a = vld1_s64 (c); b = wrap_vdupq_lane_s64_0 (a); vst1q_s64 (d, b); for (i = 0; i < 2; i++) if (c[0] != d[i]) return 1; c[0] = 1; a = vld1_s64 (c); b = wrap_vdupq_lane_s64_1 (a); vst1q_s64 (d, b); for (i = 0; i < 2; i++) if (c[0] != d[i]) return 1; return 0; }
static OPUS_INLINE void calc_corr( const opus_int32 *const input_QS, opus_int64 *const corr_QC, const opus_int offset, const int32x4_t state_QS_s32x4 ) { int64x2_t corr_QC_s64x2[ 2 ], t_s64x2[ 2 ]; const int32x4_t input_QS_s32x4 = vld1q_s32( input_QS + offset ); corr_QC_s64x2[ 0 ] = vld1q_s64( corr_QC + offset + 0 ); corr_QC_s64x2[ 1 ] = vld1q_s64( corr_QC + offset + 2 ); t_s64x2[ 0 ] = vmull_s32( vget_low_s32( state_QS_s32x4 ), vget_low_s32( input_QS_s32x4 ) ); t_s64x2[ 1 ] = vmull_s32( vget_high_s32( state_QS_s32x4 ), vget_high_s32( input_QS_s32x4 ) ); corr_QC_s64x2[ 0 ] = vsraq_n_s64( corr_QC_s64x2[ 0 ], t_s64x2[ 0 ], 2 * QS - QC ); corr_QC_s64x2[ 1 ] = vsraq_n_s64( corr_QC_s64x2[ 1 ], t_s64x2[ 1 ], 2 * QS - QC ); vst1q_s64( corr_QC + offset + 0, corr_QC_s64x2[ 0 ] ); vst1q_s64( corr_QC + offset + 2, corr_QC_s64x2[ 1 ] ); }
void test_vst1Qs64 (void) { int64_t *arg0_int64_t; int64x2_t arg1_int64x2_t; vst1q_s64 (arg0_int64_t, arg1_int64x2_t); }
int main (void) { int64x1_t input[2] = {(int64x1_t)0x0123456776543210LL, (int64x1_t)0x89abcdeffedcba90LL}; int64x1_t output[2] = {0, 0}; int64x2_t var = vld1q_dup_s64(input); vst1q_s64(output, var); if (output[0] != (int64x1_t)0x0123456776543210LL) abort(); if (output[1] != (int64x1_t)0x0123456776543210LL) abort(); return 0; }
test_vreinterpretq_s64_f64 () { float64x2_t a; int64x2_t b; float64_t c[2] = { PI_F64, E_F64 }; int64_t d[2] = { 0x400921FB54442D18, 0x4005BF0A8B145769 }; int64_t e[2]; int i; a = vld1q_f64 (c); b = wrap_vreinterpretq_s64_f64 (a); vst1q_s64 (e, b); for (i = 0; i < 2; i++) if (d[i] != e[i]) return 1; return 0; };