void test_vshrn_ns64 (void) { int32x2_t out_int32x2_t; int64x2_t arg0_int64x2_t; out_int32x2_t = vshrn_n_s64 (arg0_int64x2_t, 1); }
void mdrc5b_apply_limiter(MDRC5B_LOCAL_STRUCT_T *HeapPtr) { unsigned int LaIdx; unsigned int NumMainCh; unsigned int Samples; unsigned int ch, k, n; MMlong *Ptr; MMlong *Ptr2; MMlong *MemOutPtr; MMshort PeakdB; MMlong PeakMax; int RmsMeasure; MMshort LimiterAtCoef; MMshort LimiterReCoef; MMshort LimiterGainMant[MDRC5B_BLOCK_SIZE + 1]; MMshort LimiterGainExp; MMshort LimiterTargetGaindB; unsigned int LimiterHoldRem; unsigned int LimiterHtSamp; MMshort Exp, TargetGain; MMshort MaxShiftBits; unsigned int lookahead_len = (unsigned int) HeapPtr->LimiterLALen; unsigned int cpt1, cpt2; uint32x2x2_t Temp_u32x2x2; uint32x2_t Ldbits_u32x2, Ldbits2_u32x2; uint32x2_t bsl_u32x2; int32x2_t LimGainMant_32x2; int64x2_t TempX_64x2, MemOut_64x2; int64x2_t Tmp_64x2; int64x2_t LimiterGainExp_64x2, sample_64x2; int64x1_t TempX_64x1, sample_64x1; int32_t *LimiterGainMant_ptr; int32x2_t Tmp_32x2, Ldbits_32x2, n_32x2; int32x2_t TempX_low_32x2, TempX_high_32x2; int32x2x2_t Tmp_32x2x2; int64x1_t Peak_64x1, PeakMax_64x1, Tmp_64x1, diffX_64x1; int64x1_t Peak_scale_pow_64x1, Peak_scale_64x1, Zero_s64x1; int64x1_t MaxShiftBits_neg_64x1, MaxShiftBits_hd_64x1; int64x2_t diffX_64x2; uint64x1_t bsl_u64x1; int32x2_t LimiterPeakCoef_32x2, diffX_low_32x2, diffX_high_32x2; int32x2_t TargetGain_32x2; uint32x2x2_t Peak_u32x2x2; uint32x2_t Peak_exp_u32x2, Peak_exp2_u32x2, Peak_mant_u32x2; int32x2_t x_32x2, xn_32x2, PeakdB_32x2, Peak_exp_32x2; int32x2_t LimiterTargetGaindB_32x2, Exp_32x2, LimiterCoef_32x2; int32x4_t Tmp_32x4; START_PMU_MEASURE(PMU_MEASURE_MRDC5B_APPLY_LIMITER) START_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_COMPUTE_MAX_SHIFT_LEFT) Samples = (unsigned int) HeapPtr->BlockSize; NumMainCh = (unsigned int) HeapPtr->NumMainCh; TempX_64x2 = vdupq_n_s64(0); for(ch = 0; ch < NumMainCh; ch++) { Ptr = HeapPtr->MainInBuf[ch]; // compute the number of bits needs to be shifted to avoid overflow for(k = (Samples >> 1); k > 0; k--) { sample_64x2 = vld1q_s64(Ptr); Ptr +=2; sample_64x2 = veorq_s64(sample_64x2, vshrq_n_s64(sample_64x2, 63)); TempX_64x2 = vorrq_s64(TempX_64x2, sample_64x2); } if(Samples & 1) { sample_64x1 = vld1_s64(Ptr); sample_64x1 = veor_s64(sample_64x1, vshr_n_s64(sample_64x1, 63)); TempX_64x2 = vorrq_s64(TempX_64x2, vcombine_s64(sample_64x1, sample_64x1)); } } TempX_64x1 = vorr_s64(vget_low_s64(TempX_64x2), vget_high_s64(TempX_64x2)); Temp_u32x2x2 = vuzp_u32(vreinterpret_u32_s64(TempX_64x1), vreinterpret_u32_s64(TempX_64x1)); bsl_u32x2 = vceq_u32(Temp_u32x2x2.val[1], vdup_n_u32(0)); // MSB == 0 ? // use clz instead of cls because we are sure that input value is positive // and because cls(LSB) could be wrong (if MSB is equal to 0 and bit 31 of LSL is 1) // thus clz result will be 1 more than cls result (that's why you may see (Ldbits - 1) // instead of Ldbits below) Ldbits_u32x2 = vadd_u32(vclz_u32(Temp_u32x2x2.val[0]), vdup_n_u32(32)); // clz(LSB)+32 Ldbits2_u32x2 = vclz_u32(Temp_u32x2x2.val[1]); // clz(MSB) Ldbits_u32x2 = vbsl_u32(bsl_u32x2, Ldbits_u32x2, Ldbits2_u32x2); // MSB == 0 ? clz(LSB)+32 : clz(MSB) bsl_u32x2 = vceq_u32(Ldbits_u32x2, vdup_n_u32(64)); // Ldbits == 64 ? (i.e. TempX == 0 ?) // the aim of MaxShiftBits is that sample will be shifted so that it occupies // 24 significant bits for 24 bits samples or 32 significant bits for 32 bits samples // but we are in 64 bits architecture on CA9/NEON // so we must right shift of ((64 - 24) - (Ldbits - 1)) bits for 24 bits samples // or of ((64 - 32) - (Ldbits - 1)) bits for 32 bits samples // and we add 1 because it was done this way on MMDSP (I don't know why !) #ifdef SAMPLES_24_BITS // MaxShiftBits = ((64 - 24) - (Ldbits - 1)) + 1 // = 42 - Ldbits Ldbits_32x2 = vsub_s32(vdup_n_s32(42), vreinterpret_s32_u32(Ldbits_u32x2)); #else // SAMPLES_24_BITS // MaxShiftBits = ((64 - 32) - (Ldbits - 1)) + 1 // = 34 - Ldbits Ldbits_32x2 = vsub_s32(vdup_n_s32(34), vreinterpret_s32_u32(Ldbits_u32x2)); #endif // SAMPLES_24_BITS Ldbits_32x2 = vmax_s32(vdup_n_s32(1), Ldbits_32x2); Ldbits_32x2 = vbsl_s32(bsl_u32x2, vdup_n_s32(1), Ldbits_32x2); // if(TempX == 0) Ldbits = 1 MaxShiftBits = vget_lane_s32(Ldbits_32x2, 0); STOP_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_COMPUTE_MAX_SHIFT_LEFT) #ifdef DEBUG_LIMITER_OUTPUT if((debug_cpt_samples >= DEBUG_CPT_MIN) && (debug_cpt_samples <= DEBUG_CPT_MAX)) { char string[100]; debug_write_string("MRDC5B_LIMITER_COMPUTE_MAX_SHIFT_LEFT\n"); sprintf(string, "MaxShiftBits=%d\n", MaxShiftBits); debug_write_string(string); } #endif // DEBUG_LIMITER_OUTPUT START_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_INSERT_NEW_SUBBAND) // insert the new subband samples into the lookahead buffers RmsMeasure = HeapPtr->Limiter.RmsMeasure; LaIdx = (unsigned int) HeapPtr->LimiterLaIdx; if(LaIdx + Samples >= lookahead_len) { cpt1 = lookahead_len - LaIdx; cpt2 = Samples - cpt1; // update index HeapPtr->LimiterLaIdx = (int) cpt2; } else { cpt1 = Samples; cpt2 = 0; // update index HeapPtr->LimiterLaIdx = (int) (LaIdx + Samples); } LimiterPeakCoef_32x2 = vdup_n_s32(HeapPtr->LimiterPeakAtCoef); // LimiterPeakAtCoef, LimiterPeakAtCoef LimiterPeakCoef_32x2 = vset_lane_s32(HeapPtr->LimiterPeakReCoef, LimiterPeakCoef_32x2, 1); // LimiterPeakAtCoef, LimiterPeakReCoef Peak_scale_64x1 = vdup_n_s64(HeapPtr->PrevShiftBits - MaxShiftBits); Peak_scale_pow_64x1 = vshl_n_s64(Peak_scale_64x1, 1); MaxShiftBits_neg_64x1 = vdup_n_s64(-MaxShiftBits); #ifdef SAMPLES_24_BITS MaxShiftBits_hd_64x1 = vdup_n_s64(24 - MaxShiftBits); #else // SAMPLES_24_BITS MaxShiftBits_hd_64x1 = vdup_n_s64(32 - MaxShiftBits); #endif // SAMPLES_24_BITS PeakMax_64x1 = vdup_n_s64(0); for(ch = 0; ch < NumMainCh; ch++) { Ptr = HeapPtr->MainInBuf[ch]; Ptr2 = HeapPtr->LimiterLABuf[ch] + LaIdx; // go to the first valid sample Peak_64x1 = vdup_n_s64(HeapPtr->LimiterPeak[ch]); if(RmsMeasure) { // compensate Peak according to the previous shift bits Peak_64x1 = vqrshl_s64(Peak_64x1, Peak_scale_pow_64x1); // neg value => shift right rounding // rms measure for(k = cpt1; k > 0; k--) { Tmp_64x1 = vld1_s64(Ptr); Ptr++; vst1_s64(Ptr2, Tmp_64x1); Ptr2++; Tmp_64x1 = vqrshl_s64(Tmp_64x1, MaxShiftBits_neg_64x1); Tmp_64x2 = vcombine_s64(Tmp_64x1, Tmp_64x1); Tmp_32x2x2 = vuzp_s32(vget_low_s32(vreinterpretq_s32_s64(Tmp_64x2)), vget_high_s32(vreinterpretq_s32_s64(Tmp_64x2))); Tmp_32x2 = Tmp_32x2x2.val[0]; // LSB of Tmp_64x2 (MSB is dummy) TempX_64x2 = vqdmull_s32(Tmp_32x2, Tmp_32x2); TempX_64x1 = vget_low_s64(TempX_64x2); diffX_64x1 = vqsub_s64(Peak_64x1, TempX_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63)); // sign(diffX) diffX_64x2 = vcombine_s64(diffX_64x1, diffX_64x1); diffX_low_32x2 = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32); // wextract_l(diffX), wextract_l(diffX) diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32); // wround_L(diffX), wround_L(diffX) Tmp_64x2 = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2)); // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef) Tmp_64x2 = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2); // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef)) Tmp_64x2 = vqaddq_s64(TempX_64x2, Tmp_64x2); Peak_64x1 = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2)); Tmp_64x1 = vqsub_s64(Peak_64x1, PeakMax_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63)); // sign(Peak_64x1 - PeakMax_64x1) PeakMax_64x1 = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1); } Ptr2 = HeapPtr->LimiterLABuf[ch]; for(k = cpt2; k > 0; k--) { Tmp_64x1 = vld1_s64(Ptr); Ptr++; vst1_s64(Ptr2, Tmp_64x1); Ptr2++; Tmp_64x1 = vqrshl_s64(Tmp_64x1, MaxShiftBits_neg_64x1); Tmp_64x2 = vcombine_s64(Tmp_64x1, Tmp_64x1); Tmp_32x2x2 = vuzp_s32(vget_low_s32(vreinterpretq_s32_s64(Tmp_64x2)), vget_high_s32(vreinterpretq_s32_s64(Tmp_64x2))); Tmp_32x2 = Tmp_32x2x2.val[0]; // LSB of Tmp_64x2 (MSB is dummy) TempX_64x2 = vqdmull_s32(Tmp_32x2, Tmp_32x2); TempX_64x1 = vget_low_s64(TempX_64x2); diffX_64x1 = vqsub_s64(Peak_64x1, TempX_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63)); // sign(diffX) diffX_64x2 = vcombine_s64(diffX_64x1, diffX_64x1); diffX_low_32x2 = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32); // wextract_l(diffX), wextract_l(diffX) diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32); // wround_L(diffX), wround_L(diffX) Tmp_64x2 = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2)); // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef) Tmp_64x2 = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2); // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef)) Tmp_64x2 = vqaddq_s64(TempX_64x2, Tmp_64x2); Peak_64x1 = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2)); Tmp_64x1 = vqsub_s64(Peak_64x1, PeakMax_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63)); // sign(Peak_64x1 - PeakMax_64x1) PeakMax_64x1 = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1); } } else { // compensate Peak according to the previous shift bits Peak_64x1 = vqrshl_s64(Peak_64x1, Peak_scale_64x1); // amplitude measure Zero_s64x1 = vdup_n_s64(0); for(k = cpt1; k > 0; k--) { Tmp_64x1 = vld1_s64(Ptr); Ptr++; vst1_s64(Ptr2, Tmp_64x1); Ptr2++; bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63)); // sign(Tmp_64x1) TempX_64x1 = vqsub_s64(Zero_s64x1, Tmp_64x1); // -Tmp_64x1 TempX_64x1 = vbsl_s64(bsl_u64x1, TempX_64x1, Tmp_64x1); TempX_64x1 = vqrshl_s64(TempX_64x1, MaxShiftBits_hd_64x1); TempX_64x2 = vcombine_s64(TempX_64x1, TempX_64x1); diffX_64x1 = vqsub_s64(Peak_64x1, TempX_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63)); // sign(diffX) diffX_64x2 = vcombine_s64(diffX_64x1, diffX_64x1); diffX_low_32x2 = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32); // wextract_l(diffX), wextract_l(diffX) diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32); // wround_L(diffX), wround_L(diffX) Tmp_64x2 = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2)); // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef) Tmp_64x2 = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2); // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef)) Tmp_64x2 = vqaddq_s64(TempX_64x2, Tmp_64x2); Peak_64x1 = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2)); Tmp_64x1 = vqsub_s64(Peak_64x1, PeakMax_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63)); // sign(Peak_64x1 - PeakMax_64x1) PeakMax_64x1 = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1); } Ptr2 = HeapPtr->LimiterLABuf[ch]; for(k = cpt2; k > 0; k--) { Tmp_64x1 = vld1_s64(Ptr); Ptr++; vst1_s64(Ptr2, Tmp_64x1); Ptr2++; bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63)); // sign(Tmp_64x1) TempX_64x1 = vqsub_s64(Zero_s64x1, Tmp_64x1); // -Tmp_64x1 TempX_64x1 = vbsl_s64(bsl_u64x1, TempX_64x1, Tmp_64x1); TempX_64x1 = vqrshl_s64(TempX_64x1, MaxShiftBits_hd_64x1); TempX_64x2 = vcombine_s64(TempX_64x1, TempX_64x1); diffX_64x1 = vqsub_s64(Peak_64x1, TempX_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63)); // sign(diffX) diffX_64x2 = vcombine_s64(diffX_64x1, diffX_64x1); diffX_low_32x2 = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32); // wextract_l(diffX), wextract_l(diffX) diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32); // wround_L(diffX), wround_L(diffX) Tmp_64x2 = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2)); // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef) Tmp_64x2 = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2); // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef)) Tmp_64x2 = vqaddq_s64(TempX_64x2, Tmp_64x2); Peak_64x1 = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2)); Tmp_64x1 = vqsub_s64(Peak_64x1, PeakMax_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63)); // sign(Peak_64x1 - PeakMax_64x1) PeakMax_64x1 = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1); } } HeapPtr->LimiterPeak[ch] = vget_lane_s64(Peak_64x1, 0); // save history } // for(ch = 0...) PeakMax = vget_lane_s64(PeakMax_64x1, 0); HeapPtr->PrevShiftBits = MaxShiftBits; STOP_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_INSERT_NEW_SUBBAND) if(PeakMax < MDRC5B_ALMOST_ZERO_THRESH) { PeakdB = (MDRC5B_POWER_DB_MINUS_INF << 16); // 8.16, [-128.0, 127.0] dB } else { Peak_u32x2x2 = vuzp_u32(vreinterpret_u32_s64(PeakMax_64x1), vreinterpret_u32_s64(PeakMax_64x1)); bsl_u32x2 = vceq_u32(Peak_u32x2x2.val[1], vdup_n_u32(0)); Peak_exp_u32x2 = vadd_u32(vclz_u32(Peak_u32x2x2.val[0]), vdup_n_u32(32)); Peak_exp2_u32x2 = vclz_u32(Peak_u32x2x2.val[1]); Peak_exp_u32x2 = vbsl_u32(bsl_u32x2, Peak_exp_u32x2, Peak_exp2_u32x2); Peak_mant_u32x2 = vrshrn_n_u64(vshlq_u64(vreinterpretq_u64_s64(vcombine_s64(PeakMax_64x1, PeakMax_64x1)), vreinterpretq_s64_u64(vmovl_u32(Peak_exp_u32x2))), 32); // if(Peak_mant >= sqrt(0.5)) // { // Peak_exp--; // Peak_mant >>= 1; // } bsl_u32x2 = vcge_u32(Peak_mant_u32x2, vdup_n_u32(0xB504F334)); Peak_exp_u32x2 = vbsl_u32(bsl_u32x2, vsub_u32(Peak_exp_u32x2, vdup_n_u32(1)), Peak_exp_u32x2); Peak_mant_u32x2 = vbsl_u32(bsl_u32x2, vrshr_n_u32(Peak_mant_u32x2, 1), Peak_mant_u32x2); Peak_exp_32x2 = vreinterpret_s32_u32(Peak_exp_u32x2); #ifdef SAMPLES_24_BITS // correction of 16 bits if input samples are 24 bits Peak_exp_32x2 = vsub_s32(Peak_exp_32x2, vdup_n_s32(16)); #endif // SAMPLES_24_BITS // at this point : sqrt(0.5)/2 <= Peak_mant < sqrt(0.5) // // ln(1+x) = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8 + x^9/9 - x^10/10 ... accuracy OK if |x| < 0.5 // sqrt(0.5)/2 <= Peak_mant < sqrt(0.5) => sqrt(0.5)-1 <= 2*Peak_mant-1 < 2*sqrt(0.5)-1 // => ln(Peak_mant) = ln(1+x)-ln(2) with x=2*Peak_mant-1, i.e. |x| < 0.414214... // x=2*PeakMax_mant-1 in Q31 // => sqrt(0.5)-1 <= x < 2*sqrt(0.5)-1 x_32x2 = vreinterpret_s32_u32(vsub_u32(Peak_mant_u32x2, vdup_n_u32(0x80000000))); PeakdB_32x2 = x_32x2; // PeakdB = x xn_32x2 = vqrdmulh_s32(x_32x2, x_32x2); // xn = x^2 PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vrshr_n_s32(xn_32x2, 1)); // PeakdB = x - x^2/2 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^3 PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x2AAAAAAB))); // PeakdB = x - x^2/2 + x^3/3 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^4 PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vrshr_n_s32(xn_32x2, 2)); // PeakdB = x - x^2/2 + x^3/3 - x^4/4 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^5 PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x1999999A))); // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^6 PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x15555555))); // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^7 PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x12492492))); // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^8 PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vrshr_n_s32(xn_32x2, 3)); // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^9 PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x0E38E38E))); // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8 + x^9/9 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^10 PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x0CCCCCCD))); // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8 + x^9/9 - x^10/10 // at this point : PeakMaxdB contains ln(1+x) in Q31 if(RmsMeasure) { // dB(power) = 10*log10(power) // PeakMaxdB = 10*log10(PeakMax)+20*log10(2)*(HEADROOM+MaxShiftBits) // = 10*ln(PeakMax)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 10/ln(10)*ln(PeakMax_mant*2^(-PeakMax_exp))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 10/ln(10)*(ln(PeakMax_mant)-PeakMax_exp*ln(2))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 10/ln(10)*ln(PeakMax_mant)-PeakMax_exp*10*ln(2)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 10/ln(10)*ln(PeakMax_mant)+10*ln(2)/ln(10)*(2*(HEADROOM+MaxShiftBits)-PeakMax_exp) // // => RmsdB = 10/ln(10)*ln(1+x)+10*ln(2)/ln(10)*(2*(HEADROOM+MaxShiftBits)-PeakMax_exp) // => RmsdB (Q16) = 0x457CB*ln(1+x)+0x302A3*(2*(HEADROOM+MaxShiftBits)-PeakMax_exp) // fractional mutiply 0x457CB*ln(1+x) in Q16 PeakdB_32x2 = vqrdmulh_s32(PeakdB_32x2, vdup_n_s32(0x457CB)); // PeakdB_exp = 2*(HEADROOM+MaxShiftBits)-PeakdB_exp Peak_exp_32x2 = vsub_s32(vdup_n_s32(2 * (HEADROOM + MaxShiftBits)), Peak_exp_32x2); // PeakMaxdB final value (integer mac 0x302A3*PeakdB_exp) PeakdB_32x2 = vmla_s32(PeakdB_32x2, Peak_exp_32x2, vdup_n_s32(0x302A3)); } else { // dB(power) = 20*log10(abs) // PeakMaxdB = 20*log10(PeakMax)+20*log10(2)*(HEADROOM+MaxShiftBits) // = 20*ln(PeakMax)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 20/ln(10)*ln(PeakMax_mant*2^(-PeakMax_exp))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 20/ln(10)*(ln(PeakMax_mant)-PeakMax_exp*ln(2))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 20/ln(10)*ln(PeakMax_mant)-PeakMax_exp*20*ln(2)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 20/ln(10)*ln(PeakMax_mant)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits-PeakMax_exp) // // => RmsdB = 20/ln(10)*ln(1+x)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits-PeakMax_exp) // => RmsdB (Q16) = 0x8AF96*ln(1+x)+0x60546*(HEADROOM+MaxShiftBits-PeakMax_exp) // fractional mutiply 0x8AF96*ln(1+x) in Q16 PeakdB_32x2 = vqrdmulh_s32(PeakdB_32x2, vdup_n_s32(0x8AF96)); // PeakdB_exp = HEADROOM+MaxShiftBits-PeakdB_exp Peak_exp_32x2 = vsub_s32(vdup_n_s32(HEADROOM + MaxShiftBits), Peak_exp_32x2); // PeakMaxdB final value (integer mac 0x60546*PeakdB_exp) PeakdB_32x2 = vmla_s32(PeakdB_32x2, Peak_exp_32x2, vdup_n_s32(0x60546)); } PeakdB = vget_lane_s32(PeakdB_32x2, 0); } #ifdef DEBUG_LIMITER_OUTPUT if((debug_cpt_samples >= DEBUG_CPT_MIN) && (debug_cpt_samples <= DEBUG_CPT_MAX)) { char string[100]; debug_write_string("MRDC5B_LIMITER_PEAKMAX_PEAKDB\n"); sprintf(string, "PeakMax=0x%012llX, HEADROOM+MaxShiftBits=%d => PeakdB=0x%06X\n", #ifdef SAMPLES_24_BITS PeakMax & 0xFFFFFFFFFFFFLL, #else // SAMPLES_24_BITS (PeakMax >> 16) & 0xFFFFFFFFFFFFLL, #endif // SAMPLES_24_BITS HEADROOM + MaxShiftBits, PeakdB & 0xFFFFFF); debug_write_string(string); }
static inline void PostShiftAndDivideAndDemodulateNeon(int16_t* inre, int16_t* inim, int32_t* outre1, int32_t* outre2, int32_t sh) { int k; int16_t* p_inre = inre; int16_t* p_inim = inim; int32_t* p_outre1 = outre1; int32_t* p_outre2 = outre2; const int16_t* kCosTab = &WebRtcIsacfix_kCosTab1[0]; const int16_t* kSinTab = &WebRtcIsacfix_kSinTab1[0]; int32x4_t shift = vdupq_n_s32(-sh - 16); // Divide through by the normalizing constant: // scale all values with 1/240, i.e. with 273 in Q16. // 273/65536 ~= 0.0041656 // 1/240 ~= 0.0041666 int16x8_t scale = vdupq_n_s16(273); // Sqrt(240) in Q11 is round(15.49193338482967 * 2048) = 31727. int factQ19 = 31727 << 16; int32x4_t fact = vdupq_n_s32(factQ19); for (k = 0; k < FRAMESAMPLES/2; k += 8) { int16x8_t inre16x8 = vld1q_s16(p_inre); int16x8_t inim16x8 = vld1q_s16(p_inim); p_inre += 8; p_inim += 8; int16x8_t tmpr = vld1q_s16(kCosTab); int16x8_t tmpi = vld1q_s16(kSinTab); kCosTab += 8; kSinTab += 8; // By vshl and vmull, we effectively did "<< (-sh - 16)", // instead of "<< (-sh)" and ">> 16" as in the C code. int32x4_t outre1_0 = vmull_s16(vget_low_s16(inre16x8), vget_low_s16(scale)); int32x4_t outre2_0 = vmull_s16(vget_low_s16(inim16x8), vget_low_s16(scale)); #if defined(WEBRTC_ARCH_ARM64) int32x4_t outre1_1 = vmull_high_s16(inre16x8, scale); int32x4_t outre2_1 = vmull_high_s16(inim16x8, scale); #else int32x4_t outre1_1 = vmull_s16(vget_high_s16(inre16x8), vget_high_s16(scale)); int32x4_t outre2_1 = vmull_s16(vget_high_s16(inim16x8), vget_high_s16(scale)); #endif outre1_0 = vshlq_s32(outre1_0, shift); outre1_1 = vshlq_s32(outre1_1, shift); outre2_0 = vshlq_s32(outre2_0, shift); outre2_1 = vshlq_s32(outre2_1, shift); // Demodulate and separate. int32x4_t tmpr_0 = vmovl_s16(vget_low_s16(tmpr)); int32x4_t tmpi_0 = vmovl_s16(vget_low_s16(tmpi)); #if defined(WEBRTC_ARCH_ARM64) int32x4_t tmpr_1 = vmovl_high_s16(tmpr); int32x4_t tmpi_1 = vmovl_high_s16(tmpi); #else int32x4_t tmpr_1 = vmovl_s16(vget_high_s16(tmpr)); int32x4_t tmpi_1 = vmovl_s16(vget_high_s16(tmpi)); #endif int64x2_t xr0 = vmull_s32(vget_low_s32(tmpr_0), vget_low_s32(outre1_0)); int64x2_t xi0 = vmull_s32(vget_low_s32(tmpr_0), vget_low_s32(outre2_0)); int64x2_t xr2 = vmull_s32(vget_low_s32(tmpr_1), vget_low_s32(outre1_1)); int64x2_t xi2 = vmull_s32(vget_low_s32(tmpr_1), vget_low_s32(outre2_1)); xr0 = vmlsl_s32(xr0, vget_low_s32(tmpi_0), vget_low_s32(outre2_0)); xi0 = vmlal_s32(xi0, vget_low_s32(tmpi_0), vget_low_s32(outre1_0)); xr2 = vmlsl_s32(xr2, vget_low_s32(tmpi_1), vget_low_s32(outre2_1)); xi2 = vmlal_s32(xi2, vget_low_s32(tmpi_1), vget_low_s32(outre1_1)); #if defined(WEBRTC_ARCH_ARM64) int64x2_t xr1 = vmull_high_s32(tmpr_0, outre1_0); int64x2_t xi1 = vmull_high_s32(tmpr_0, outre2_0); int64x2_t xr3 = vmull_high_s32(tmpr_1, outre1_1); int64x2_t xi3 = vmull_high_s32(tmpr_1, outre2_1); xr1 = vmlsl_high_s32(xr1, tmpi_0, outre2_0); xi1 = vmlal_high_s32(xi1, tmpi_0, outre1_0); xr3 = vmlsl_high_s32(xr3, tmpi_1, outre2_1); xi3 = vmlal_high_s32(xi3, tmpi_1, outre1_1); #else int64x2_t xr1 = vmull_s32(vget_high_s32(tmpr_0), vget_high_s32(outre1_0)); int64x2_t xi1 = vmull_s32(vget_high_s32(tmpr_0), vget_high_s32(outre2_0)); int64x2_t xr3 = vmull_s32(vget_high_s32(tmpr_1), vget_high_s32(outre1_1)); int64x2_t xi3 = vmull_s32(vget_high_s32(tmpr_1), vget_high_s32(outre2_1)); xr1 = vmlsl_s32(xr1, vget_high_s32(tmpi_0), vget_high_s32(outre2_0)); xi1 = vmlal_s32(xi1, vget_high_s32(tmpi_0), vget_high_s32(outre1_0)); xr3 = vmlsl_s32(xr3, vget_high_s32(tmpi_1), vget_high_s32(outre2_1)); xi3 = vmlal_s32(xi3, vget_high_s32(tmpi_1), vget_high_s32(outre1_1)); #endif outre1_0 = vcombine_s32(vshrn_n_s64(xr0, 10), vshrn_n_s64(xr1, 10)); outre2_0 = vcombine_s32(vshrn_n_s64(xi0, 10), vshrn_n_s64(xi1, 10)); outre1_1 = vcombine_s32(vshrn_n_s64(xr2, 10), vshrn_n_s64(xr3, 10)); outre2_1 = vcombine_s32(vshrn_n_s64(xi2, 10), vshrn_n_s64(xi3, 10)); outre1_0 = vqdmulhq_s32(outre1_0, fact); outre2_0 = vqdmulhq_s32(outre2_0, fact); outre1_1 = vqdmulhq_s32(outre1_1, fact); outre2_1 = vqdmulhq_s32(outre2_1, fact); vst1q_s32(p_outre1, outre1_0); p_outre1 += 4; vst1q_s32(p_outre1, outre1_1); p_outre1 += 4; vst1q_s32(p_outre2, outre2_0); p_outre2 += 4; vst1q_s32(p_outre2, outre2_1); p_outre2 += 4; } }