static inline void DotProductWithScaleNeon(int32_t* cross_correlation, const int16_t* vector1, const int16_t* vector2, size_t length, int scaling) { size_t i = 0; size_t len1 = length >> 3; size_t len2 = length & 7; int64x2_t sum0 = vdupq_n_s64(0); int64x2_t sum1 = vdupq_n_s64(0); for (i = len1; i > 0; i -= 1) { int16x8_t seq1_16x8 = vld1q_s16(vector1); int16x8_t seq2_16x8 = vld1q_s16(vector2); #if defined(WEBRTC_ARCH_ARM64) int32x4_t tmp0 = vmull_s16(vget_low_s16(seq1_16x8), vget_low_s16(seq2_16x8)); int32x4_t tmp1 = vmull_high_s16(seq1_16x8, seq2_16x8); #else int32x4_t tmp0 = vmull_s16(vget_low_s16(seq1_16x8), vget_low_s16(seq2_16x8)); int32x4_t tmp1 = vmull_s16(vget_high_s16(seq1_16x8), vget_high_s16(seq2_16x8)); #endif sum0 = vpadalq_s32(sum0, tmp0); sum1 = vpadalq_s32(sum1, tmp1); vector1 += 8; vector2 += 8; } // Calculate the rest of the samples. int64_t sum_res = 0; for (i = len2; i > 0; i -= 1) { sum_res += WEBRTC_SPL_MUL_16_16(*vector1, *vector2); vector1++; vector2++; } sum0 = vaddq_s64(sum0, sum1); #if defined(WEBRTC_ARCH_ARM64) int64_t sum2 = vaddvq_s64(sum0); *cross_correlation = (int32_t)((sum2 + sum_res) >> scaling); #else int64x1_t shift = vdup_n_s64(-scaling); int64x1_t sum2 = vadd_s64(vget_low_s64(sum0), vget_high_s64(sum0)); sum2 = vadd_s64(sum2, vdup_n_s64(sum_res)); sum2 = vshl_s64(sum2, shift); vst1_lane_s32(cross_correlation, vreinterpret_s32_s64(sum2), 0); #endif }
void test_vdupQ_ns64 (void) { int64x2_t out_int64x2_t; int64_t arg0_int64_t; out_int64x2_t = vdupq_n_s64 (arg0_int64_t); }
int64_t av1_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff, int block_size) { int64x2_t error = vdupq_n_s64(0); assert(block_size >= 8); assert((block_size % 8) == 0); do { const int16x8_t c = vld1q_s16(coeff); const int16x8_t d = vld1q_s16(dqcoeff); const int16x8_t diff = vsubq_s16(c, d); const int16x4_t diff_lo = vget_low_s16(diff); const int16x4_t diff_hi = vget_high_s16(diff); // diff is 15-bits, the squares 30, so we can store 2 in 31-bits before // accumulating them in 64-bits. const int32x4_t err0 = vmull_s16(diff_lo, diff_lo); const int32x4_t err1 = vmlal_s16(err0, diff_hi, diff_hi); const int64x2_t err2 = vaddl_s32(vget_low_s32(err1), vget_high_s32(err1)); error = vaddq_s64(error, err2); coeff += 8; dqcoeff += 8; block_size -= 8; } while (block_size != 0); return vgetq_lane_s64(error, 0) + vgetq_lane_s64(error, 1); }
int main (void) { int64x2_t out_int64x2_t = {0, 0}; int64_t arg0_int64_t = (int64_t) 0xdeadbeef; out_int64x2_t = vdupq_n_s64 (arg0_int64_t); if (vgetq_lane_s64 (out_int64x2_t, 0) != arg0_int64_t) abort(); if (vgetq_lane_s64 (out_int64x2_t, 1) != arg0_int64_t) abort(); return 0; }
int64x2_t test_vdupq_n_s64(int64_t v1) { // CHECK: test_vdupq_n_s64 return vdupq_n_s64(v1); // CHECK: dup {{v[0-9]+}}.2d, {{x[0-9]+}} }
int16_t* __restrict scale) { int i = 0; int16_t scaling = 0; uint32_t temp = 0; int64_t prod = 0; int64_t prod_tail = 0; assert(n % 4 == 0); assert(n >= 8); // Calculate r[0]. int16x4_t x0_v; int32x4_t tmpa0_v; int64x2_t tmpb_v; tmpb_v = vdupq_n_s64(0); const int16_t* x_start = x; const int16_t* x_end0 = x_start + n; while (x_start < x_end0) { x0_v = vld1_s16(x_start); tmpa0_v = vmull_s16(x0_v, x0_v); tmpb_v = vpadalq_s32(tmpb_v, tmpa0_v); x_start += 4; } #ifdef WEBRTC_ARCH_ARM64 prod = vaddvq_s64(tmpb_v); #else prod = vget_lane_s64(vadd_s64(vget_low_s64(tmpb_v), vget_high_s64(tmpb_v)), 0); #endif
int vp8_denoiser_filter_neon(unsigned char *mc_running_avg_y, int mc_running_avg_y_stride, unsigned char *running_avg_y, int running_avg_y_stride, unsigned char *sig, int sig_stride, unsigned int motion_magnitude, int increase_denoising) { /* If motion_magnitude is small, making the denoiser more aggressive by * increasing the adjustment for each level, level1 adjustment is * increased, the deltas stay the same. */ int shift_inc = (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0; const uint8x16_t v_level1_adjustment = vmovq_n_u8( (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 + shift_inc : 3); const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1); const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2); const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc); const uint8x16_t v_level2_threshold = vdupq_n_u8(8); const uint8x16_t v_level3_threshold = vdupq_n_u8(16); int64x2_t v_sum_diff_total = vdupq_n_s64(0); /* Go over lines. */ int r; for (r = 0; r < 16; ++r) { /* Load inputs. */ const uint8x16_t v_sig = vld1q_u8(sig); const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y); /* Calculate absolute difference and sign masks. */ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y); const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y); const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y); /* Figure out which level that put us in. */ const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff); const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff); const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff); /* Calculate absolute adjustments for level 1, 2 and 3. */ const uint8x16_t v_level2_adjustment = vandq_u8(v_level2_mask, v_delta_level_1_and_2); const uint8x16_t v_level3_adjustment = vandq_u8(v_level3_mask, v_delta_level_2_and_3); const uint8x16_t v_level1and2_adjustment = vaddq_u8(v_level1_adjustment, v_level2_adjustment); const uint8x16_t v_level1and2and3_adjustment = vaddq_u8( v_level1and2_adjustment, v_level3_adjustment); /* Figure adjustment absolute value by selecting between the absolute * difference if in level0 or the value for level 1, 2 and 3. */ const uint8x16_t v_abs_adjustment = vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff); /* Calculate positive and negative adjustments. Apply them to the signal * and accumulate them. Adjustments are less than eight and the maximum * sum of them (7 * 16) can fit in a signed char. */ const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask, v_abs_adjustment); const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask, v_abs_adjustment); uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment); v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment); /* Store results. */ vst1q_u8(running_avg_y, v_running_avg_y); /* Sum all the accumulators to have the sum of all pixel differences * for this macroblock. */ { const int8x16_t v_sum_diff = vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment), vreinterpretq_s8_u8(v_neg_adjustment)); const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff); const int32x4_t fedc_ba98_7654_3210 = vpaddlq_s16(fe_dc_ba_98_76_54_32_10); const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210); v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210); } /* Update pointers for next iteration. */ sig += sig_stride; mc_running_avg_y += mc_running_avg_y_stride; running_avg_y += running_avg_y_stride; } /* Too much adjustments => copy block. */ { int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total), vget_low_s64(v_sum_diff_total)); int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0); int sum_diff_thresh = SUM_DIFF_THRESHOLD; if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH; if (sum_diff > sum_diff_thresh) { // Before returning to copy the block (i.e., apply no denoising), // checK if we can still apply some (weaker) temporal filtering to // this block, that would otherwise not be denoised at all. Simplest // is to apply an additional adjustment to running_avg_y to bring it // closer to sig. The adjustment is capped by a maximum delta, and // chosen such that in most cases the resulting sum_diff will be // within the accceptable range given by sum_diff_thresh. // The delta is set by the excess of absolute pixel diff over the // threshold. int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1; // Only apply the adjustment for max delta up to 3. if (delta < 4) { const uint8x16_t k_delta = vmovq_n_u8(delta); sig -= sig_stride * 16; mc_running_avg_y -= mc_running_avg_y_stride * 16; running_avg_y -= running_avg_y_stride * 16; for (r = 0; r < 16; ++r) { uint8x16_t v_running_avg_y = vld1q_u8(running_avg_y); const uint8x16_t v_sig = vld1q_u8(sig); const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y); /* Calculate absolute difference and sign masks. */ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y); const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y); const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y); // Clamp absolute difference to delta to get the adjustment. const uint8x16_t v_abs_adjustment = vminq_u8(v_abs_diff, (k_delta)); const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask, v_abs_adjustment); const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask, v_abs_adjustment); v_running_avg_y = vqsubq_u8(v_running_avg_y, v_pos_adjustment); v_running_avg_y = vqaddq_u8(v_running_avg_y, v_neg_adjustment); /* Store results. */ vst1q_u8(running_avg_y, v_running_avg_y); { const int8x16_t v_sum_diff = vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment), vreinterpretq_s8_u8(v_pos_adjustment)); const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff); const int32x4_t fedc_ba98_7654_3210 = vpaddlq_s16(fe_dc_ba_98_76_54_32_10); const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210); v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210); } /* Update pointers for next iteration. */ sig += sig_stride; mc_running_avg_y += mc_running_avg_y_stride; running_avg_y += running_avg_y_stride; } { // Update the sum of all pixel differences of this MB. x = vqadd_s64(vget_high_s64(v_sum_diff_total), vget_low_s64(v_sum_diff_total)); sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0); if (sum_diff > sum_diff_thresh) { return COPY_BLOCK; } } } else { return COPY_BLOCK; } } }
inline int64x2_t vdupq_n(const s64 & val) { return vdupq_n_s64(val); }
void silk_warped_autocorrelation_FIX_neon( opus_int32 *corr, /* O Result [order + 1] */ opus_int *scale, /* O Scaling of the correlation vector */ const opus_int16 *input, /* I Input data to correlate */ const opus_int warping_Q16, /* I Warping coefficient */ const opus_int length, /* I Length of input */ const opus_int order /* I Correlation order (even) */ ) { if( ( MAX_SHAPE_LPC_ORDER > 24 ) || ( order < 6 ) ) { silk_warped_autocorrelation_FIX_c( corr, scale, input, warping_Q16, length, order ); } else { opus_int n, i, lsh; opus_int64 corr_QC[ MAX_SHAPE_LPC_ORDER + 1 ] = { 0 }; /* In reverse order */ opus_int64 corr_QC_orderT; int64x2_t lsh_s64x2; const opus_int orderT = ( order + 3 ) & ~3; opus_int64 *corr_QCT; opus_int32 *input_QS; VARDECL( opus_int32, input_QST ); VARDECL( opus_int32, state ); SAVE_STACK; /* Order must be even */ silk_assert( ( order & 1 ) == 0 ); silk_assert( 2 * QS - QC >= 0 ); ALLOC( input_QST, length + 2 * MAX_SHAPE_LPC_ORDER, opus_int32 ); input_QS = input_QST; /* input_QS has zero paddings in the beginning and end. */ vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); input_QS += 4; vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); input_QS += 4; vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); input_QS += 4; vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); input_QS += 4; vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); input_QS += 4; vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); input_QS += 4; /* Loop over samples */ for( n = 0; n < length - 7; n += 8, input_QS += 8 ) { const int16x8_t t0_s16x4 = vld1q_s16( input + n ); vst1q_s32( input_QS + 0, vshll_n_s16( vget_low_s16( t0_s16x4 ), QS ) ); vst1q_s32( input_QS + 4, vshll_n_s16( vget_high_s16( t0_s16x4 ), QS ) ); } for( ; n < length; n++, input_QS++ ) { input_QS[ 0 ] = silk_LSHIFT32( (opus_int32)input[ n ], QS ); } vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); input_QS += 4; vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); input_QS += 4; vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); input_QS += 4; vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); input_QS += 4; vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); input_QS += 4; vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); input_QS = input_QST + MAX_SHAPE_LPC_ORDER - orderT; /* The following loop runs ( length + order ) times, with ( order ) extra epilogues. */ /* The zero paddings in input_QS guarantee corr_QC's correctness even with the extra epilogues. */ /* The values of state_QS will be polluted by the extra epilogues, however they are temporary values. */ /* Keep the C code here to help understand the intrinsics optimization. */ /* { opus_int32 state_QS[ 2 ][ MAX_SHAPE_LPC_ORDER + 1 ] = { 0 }; opus_int32 *state_QST[ 3 ]; state_QST[ 0 ] = state_QS[ 0 ]; state_QST[ 1 ] = state_QS[ 1 ]; for( n = 0; n < length + order; n++, input_QS++ ) { state_QST[ 0 ][ orderT ] = input_QS[ orderT ]; for( i = 0; i < orderT; i++ ) { corr_QC[ i ] += silk_RSHIFT64( silk_SMULL( state_QST[ 0 ][ i ], input_QS[ i ] ), 2 * QS - QC ); state_QST[ 1 ][ i ] = silk_SMLAWB( state_QST[ 1 ][ i + 1 ], state_QST[ 0 ][ i ] - state_QST[ 0 ][ i + 1 ], warping_Q16 ); } state_QST[ 2 ] = state_QST[ 0 ]; state_QST[ 0 ] = state_QST[ 1 ]; state_QST[ 1 ] = state_QST[ 2 ]; } } */ { const int32x4_t warping_Q16_s32x4 = vdupq_n_s32( warping_Q16 << 15 ); const opus_int32 *in = input_QS + orderT; opus_int o = orderT; int32x4_t state_QS_s32x4[ 3 ][ 2 ]; ALLOC( state, length + orderT, opus_int32 ); state_QS_s32x4[ 2 ][ 1 ] = vdupq_n_s32( 0 ); /* Calculate 8 taps of all inputs in each loop. */ do { state_QS_s32x4[ 0 ][ 0 ] = state_QS_s32x4[ 0 ][ 1 ] = state_QS_s32x4[ 1 ][ 0 ] = state_QS_s32x4[ 1 ][ 1 ] = vdupq_n_s32( 0 ); n = 0; do { calc_corr( input_QS + n, corr_QC, o - 8, state_QS_s32x4[ 0 ][ 0 ] ); calc_corr( input_QS + n, corr_QC, o - 4, state_QS_s32x4[ 0 ][ 1 ] ); state_QS_s32x4[ 2 ][ 1 ] = vld1q_s32( in + n ); vst1q_lane_s32( state + n, state_QS_s32x4[ 0 ][ 0 ], 0 ); state_QS_s32x4[ 2 ][ 0 ] = vextq_s32( state_QS_s32x4[ 0 ][ 0 ], state_QS_s32x4[ 0 ][ 1 ], 1 ); state_QS_s32x4[ 2 ][ 1 ] = vextq_s32( state_QS_s32x4[ 0 ][ 1 ], state_QS_s32x4[ 2 ][ 1 ], 1 ); state_QS_s32x4[ 0 ][ 0 ] = calc_state( state_QS_s32x4[ 0 ][ 0 ], state_QS_s32x4[ 2 ][ 0 ], state_QS_s32x4[ 1 ][ 0 ], warping_Q16_s32x4 ); state_QS_s32x4[ 0 ][ 1 ] = calc_state( state_QS_s32x4[ 0 ][ 1 ], state_QS_s32x4[ 2 ][ 1 ], state_QS_s32x4[ 1 ][ 1 ], warping_Q16_s32x4 ); state_QS_s32x4[ 1 ][ 0 ] = state_QS_s32x4[ 2 ][ 0 ]; state_QS_s32x4[ 1 ][ 1 ] = state_QS_s32x4[ 2 ][ 1 ]; } while( ++n < ( length + order ) ); in = state; o -= 8; } while( o > 4 ); if( o ) { /* Calculate the last 4 taps of all inputs. */ opus_int32 *stateT = state; silk_assert( o == 4 ); state_QS_s32x4[ 0 ][ 0 ] = state_QS_s32x4[ 1 ][ 0 ] = vdupq_n_s32( 0 ); n = length + order; do { calc_corr( input_QS, corr_QC, 0, state_QS_s32x4[ 0 ][ 0 ] ); state_QS_s32x4[ 2 ][ 0 ] = vld1q_s32( stateT ); vst1q_lane_s32( stateT, state_QS_s32x4[ 0 ][ 0 ], 0 ); state_QS_s32x4[ 2 ][ 0 ] = vextq_s32( state_QS_s32x4[ 0 ][ 0 ], state_QS_s32x4[ 2 ][ 0 ], 1 ); state_QS_s32x4[ 0 ][ 0 ] = calc_state( state_QS_s32x4[ 0 ][ 0 ], state_QS_s32x4[ 2 ][ 0 ], state_QS_s32x4[ 1 ][ 0 ], warping_Q16_s32x4 ); state_QS_s32x4[ 1 ][ 0 ] = state_QS_s32x4[ 2 ][ 0 ]; input_QS++; stateT++; } while( --n ); } } { const opus_int16 *inputT = input; int32x4_t t_s32x4; int64x1_t t_s64x1; int64x2_t t_s64x2 = vdupq_n_s64( 0 ); for( n = 0; n <= length - 8; n += 8 ) { int16x8_t input_s16x8 = vld1q_s16( inputT ); t_s32x4 = vmull_s16( vget_low_s16( input_s16x8 ), vget_low_s16( input_s16x8 ) ); t_s32x4 = vmlal_s16( t_s32x4, vget_high_s16( input_s16x8 ), vget_high_s16( input_s16x8 ) ); t_s64x2 = vaddw_s32( t_s64x2, vget_low_s32( t_s32x4 ) ); t_s64x2 = vaddw_s32( t_s64x2, vget_high_s32( t_s32x4 ) ); inputT += 8; } t_s64x1 = vadd_s64( vget_low_s64( t_s64x2 ), vget_high_s64( t_s64x2 ) ); corr_QC_orderT = vget_lane_s64( t_s64x1, 0 ); for( ; n < length; n++ ) { corr_QC_orderT += silk_SMULL( input[ n ], input[ n ] ); } corr_QC_orderT = silk_LSHIFT64( corr_QC_orderT, QC ); corr_QC[ orderT ] = corr_QC_orderT; } corr_QCT = corr_QC + orderT - order; lsh = silk_CLZ64( corr_QC_orderT ) - 35; lsh = silk_LIMIT( lsh, -12 - QC, 30 - QC ); *scale = -( QC + lsh ); silk_assert( *scale >= -30 && *scale <= 12 ); lsh_s64x2 = vdupq_n_s64( lsh ); for( i = 0; i <= order - 3; i += 4 ) { int32x4_t corr_s32x4; int64x2_t corr_QC0_s64x2, corr_QC1_s64x2; corr_QC0_s64x2 = vld1q_s64( corr_QCT + i ); corr_QC1_s64x2 = vld1q_s64( corr_QCT + i + 2 ); corr_QC0_s64x2 = vshlq_s64( corr_QC0_s64x2, lsh_s64x2 ); corr_QC1_s64x2 = vshlq_s64( corr_QC1_s64x2, lsh_s64x2 ); corr_s32x4 = vcombine_s32( vmovn_s64( corr_QC1_s64x2 ), vmovn_s64( corr_QC0_s64x2 ) ); corr_s32x4 = vrev64q_s32( corr_s32x4 ); vst1q_s32( corr + order - i - 3, corr_s32x4 ); } if( lsh >= 0 ) { for( ; i < order + 1; i++ ) { corr[ order - i ] = (opus_int32)silk_CHECK_FIT32( silk_LSHIFT64( corr_QCT[ i ], lsh ) ); } } else { for( ; i < order + 1; i++ ) { corr[ order - i ] = (opus_int32)silk_CHECK_FIT32( silk_RSHIFT64( corr_QCT[ i ], -lsh ) ); } } silk_assert( corr_QCT[ order ] >= 0 ); /* If breaking, decrease QC*/ RESTORE_STACK; } #ifdef OPUS_CHECK_ASM { opus_int32 corr_c[ MAX_SHAPE_LPC_ORDER + 1 ]; opus_int scale_c; silk_warped_autocorrelation_FIX_c( corr_c, &scale_c, input, warping_Q16, length, order ); silk_assert( !memcmp( corr_c, corr, sizeof( corr_c[ 0 ] ) * ( order + 1 ) ) ); silk_assert( scale_c == *scale ); } #endif }
void mdrc5b_apply_limiter(MDRC5B_LOCAL_STRUCT_T *HeapPtr) { unsigned int LaIdx; unsigned int NumMainCh; unsigned int Samples; unsigned int ch, k, n; MMlong *Ptr; MMlong *Ptr2; MMlong *MemOutPtr; MMshort PeakdB; MMlong PeakMax; int RmsMeasure; MMshort LimiterAtCoef; MMshort LimiterReCoef; MMshort LimiterGainMant[MDRC5B_BLOCK_SIZE + 1]; MMshort LimiterGainExp; MMshort LimiterTargetGaindB; unsigned int LimiterHoldRem; unsigned int LimiterHtSamp; MMshort Exp, TargetGain; MMshort MaxShiftBits; unsigned int lookahead_len = (unsigned int) HeapPtr->LimiterLALen; unsigned int cpt1, cpt2; uint32x2x2_t Temp_u32x2x2; uint32x2_t Ldbits_u32x2, Ldbits2_u32x2; uint32x2_t bsl_u32x2; int32x2_t LimGainMant_32x2; int64x2_t TempX_64x2, MemOut_64x2; int64x2_t Tmp_64x2; int64x2_t LimiterGainExp_64x2, sample_64x2; int64x1_t TempX_64x1, sample_64x1; int32_t *LimiterGainMant_ptr; int32x2_t Tmp_32x2, Ldbits_32x2, n_32x2; int32x2_t TempX_low_32x2, TempX_high_32x2; int32x2x2_t Tmp_32x2x2; int64x1_t Peak_64x1, PeakMax_64x1, Tmp_64x1, diffX_64x1; int64x1_t Peak_scale_pow_64x1, Peak_scale_64x1, Zero_s64x1; int64x1_t MaxShiftBits_neg_64x1, MaxShiftBits_hd_64x1; int64x2_t diffX_64x2; uint64x1_t bsl_u64x1; int32x2_t LimiterPeakCoef_32x2, diffX_low_32x2, diffX_high_32x2; int32x2_t TargetGain_32x2; uint32x2x2_t Peak_u32x2x2; uint32x2_t Peak_exp_u32x2, Peak_exp2_u32x2, Peak_mant_u32x2; int32x2_t x_32x2, xn_32x2, PeakdB_32x2, Peak_exp_32x2; int32x2_t LimiterTargetGaindB_32x2, Exp_32x2, LimiterCoef_32x2; int32x4_t Tmp_32x4; START_PMU_MEASURE(PMU_MEASURE_MRDC5B_APPLY_LIMITER) START_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_COMPUTE_MAX_SHIFT_LEFT) Samples = (unsigned int) HeapPtr->BlockSize; NumMainCh = (unsigned int) HeapPtr->NumMainCh; TempX_64x2 = vdupq_n_s64(0); for(ch = 0; ch < NumMainCh; ch++) { Ptr = HeapPtr->MainInBuf[ch]; // compute the number of bits needs to be shifted to avoid overflow for(k = (Samples >> 1); k > 0; k--) { sample_64x2 = vld1q_s64(Ptr); Ptr +=2; sample_64x2 = veorq_s64(sample_64x2, vshrq_n_s64(sample_64x2, 63)); TempX_64x2 = vorrq_s64(TempX_64x2, sample_64x2); } if(Samples & 1) { sample_64x1 = vld1_s64(Ptr); sample_64x1 = veor_s64(sample_64x1, vshr_n_s64(sample_64x1, 63)); TempX_64x2 = vorrq_s64(TempX_64x2, vcombine_s64(sample_64x1, sample_64x1)); } } TempX_64x1 = vorr_s64(vget_low_s64(TempX_64x2), vget_high_s64(TempX_64x2)); Temp_u32x2x2 = vuzp_u32(vreinterpret_u32_s64(TempX_64x1), vreinterpret_u32_s64(TempX_64x1)); bsl_u32x2 = vceq_u32(Temp_u32x2x2.val[1], vdup_n_u32(0)); // MSB == 0 ? // use clz instead of cls because we are sure that input value is positive // and because cls(LSB) could be wrong (if MSB is equal to 0 and bit 31 of LSL is 1) // thus clz result will be 1 more than cls result (that's why you may see (Ldbits - 1) // instead of Ldbits below) Ldbits_u32x2 = vadd_u32(vclz_u32(Temp_u32x2x2.val[0]), vdup_n_u32(32)); // clz(LSB)+32 Ldbits2_u32x2 = vclz_u32(Temp_u32x2x2.val[1]); // clz(MSB) Ldbits_u32x2 = vbsl_u32(bsl_u32x2, Ldbits_u32x2, Ldbits2_u32x2); // MSB == 0 ? clz(LSB)+32 : clz(MSB) bsl_u32x2 = vceq_u32(Ldbits_u32x2, vdup_n_u32(64)); // Ldbits == 64 ? (i.e. TempX == 0 ?) // the aim of MaxShiftBits is that sample will be shifted so that it occupies // 24 significant bits for 24 bits samples or 32 significant bits for 32 bits samples // but we are in 64 bits architecture on CA9/NEON // so we must right shift of ((64 - 24) - (Ldbits - 1)) bits for 24 bits samples // or of ((64 - 32) - (Ldbits - 1)) bits for 32 bits samples // and we add 1 because it was done this way on MMDSP (I don't know why !) #ifdef SAMPLES_24_BITS // MaxShiftBits = ((64 - 24) - (Ldbits - 1)) + 1 // = 42 - Ldbits Ldbits_32x2 = vsub_s32(vdup_n_s32(42), vreinterpret_s32_u32(Ldbits_u32x2)); #else // SAMPLES_24_BITS // MaxShiftBits = ((64 - 32) - (Ldbits - 1)) + 1 // = 34 - Ldbits Ldbits_32x2 = vsub_s32(vdup_n_s32(34), vreinterpret_s32_u32(Ldbits_u32x2)); #endif // SAMPLES_24_BITS Ldbits_32x2 = vmax_s32(vdup_n_s32(1), Ldbits_32x2); Ldbits_32x2 = vbsl_s32(bsl_u32x2, vdup_n_s32(1), Ldbits_32x2); // if(TempX == 0) Ldbits = 1 MaxShiftBits = vget_lane_s32(Ldbits_32x2, 0); STOP_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_COMPUTE_MAX_SHIFT_LEFT) #ifdef DEBUG_LIMITER_OUTPUT if((debug_cpt_samples >= DEBUG_CPT_MIN) && (debug_cpt_samples <= DEBUG_CPT_MAX)) { char string[100]; debug_write_string("MRDC5B_LIMITER_COMPUTE_MAX_SHIFT_LEFT\n"); sprintf(string, "MaxShiftBits=%d\n", MaxShiftBits); debug_write_string(string); } #endif // DEBUG_LIMITER_OUTPUT START_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_INSERT_NEW_SUBBAND) // insert the new subband samples into the lookahead buffers RmsMeasure = HeapPtr->Limiter.RmsMeasure; LaIdx = (unsigned int) HeapPtr->LimiterLaIdx; if(LaIdx + Samples >= lookahead_len) { cpt1 = lookahead_len - LaIdx; cpt2 = Samples - cpt1; // update index HeapPtr->LimiterLaIdx = (int) cpt2; } else { cpt1 = Samples; cpt2 = 0; // update index HeapPtr->LimiterLaIdx = (int) (LaIdx + Samples); } LimiterPeakCoef_32x2 = vdup_n_s32(HeapPtr->LimiterPeakAtCoef); // LimiterPeakAtCoef, LimiterPeakAtCoef LimiterPeakCoef_32x2 = vset_lane_s32(HeapPtr->LimiterPeakReCoef, LimiterPeakCoef_32x2, 1); // LimiterPeakAtCoef, LimiterPeakReCoef Peak_scale_64x1 = vdup_n_s64(HeapPtr->PrevShiftBits - MaxShiftBits); Peak_scale_pow_64x1 = vshl_n_s64(Peak_scale_64x1, 1); MaxShiftBits_neg_64x1 = vdup_n_s64(-MaxShiftBits); #ifdef SAMPLES_24_BITS MaxShiftBits_hd_64x1 = vdup_n_s64(24 - MaxShiftBits); #else // SAMPLES_24_BITS MaxShiftBits_hd_64x1 = vdup_n_s64(32 - MaxShiftBits); #endif // SAMPLES_24_BITS PeakMax_64x1 = vdup_n_s64(0); for(ch = 0; ch < NumMainCh; ch++) { Ptr = HeapPtr->MainInBuf[ch]; Ptr2 = HeapPtr->LimiterLABuf[ch] + LaIdx; // go to the first valid sample Peak_64x1 = vdup_n_s64(HeapPtr->LimiterPeak[ch]); if(RmsMeasure) { // compensate Peak according to the previous shift bits Peak_64x1 = vqrshl_s64(Peak_64x1, Peak_scale_pow_64x1); // neg value => shift right rounding // rms measure for(k = cpt1; k > 0; k--) { Tmp_64x1 = vld1_s64(Ptr); Ptr++; vst1_s64(Ptr2, Tmp_64x1); Ptr2++; Tmp_64x1 = vqrshl_s64(Tmp_64x1, MaxShiftBits_neg_64x1); Tmp_64x2 = vcombine_s64(Tmp_64x1, Tmp_64x1); Tmp_32x2x2 = vuzp_s32(vget_low_s32(vreinterpretq_s32_s64(Tmp_64x2)), vget_high_s32(vreinterpretq_s32_s64(Tmp_64x2))); Tmp_32x2 = Tmp_32x2x2.val[0]; // LSB of Tmp_64x2 (MSB is dummy) TempX_64x2 = vqdmull_s32(Tmp_32x2, Tmp_32x2); TempX_64x1 = vget_low_s64(TempX_64x2); diffX_64x1 = vqsub_s64(Peak_64x1, TempX_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63)); // sign(diffX) diffX_64x2 = vcombine_s64(diffX_64x1, diffX_64x1); diffX_low_32x2 = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32); // wextract_l(diffX), wextract_l(diffX) diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32); // wround_L(diffX), wround_L(diffX) Tmp_64x2 = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2)); // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef) Tmp_64x2 = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2); // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef)) Tmp_64x2 = vqaddq_s64(TempX_64x2, Tmp_64x2); Peak_64x1 = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2)); Tmp_64x1 = vqsub_s64(Peak_64x1, PeakMax_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63)); // sign(Peak_64x1 - PeakMax_64x1) PeakMax_64x1 = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1); } Ptr2 = HeapPtr->LimiterLABuf[ch]; for(k = cpt2; k > 0; k--) { Tmp_64x1 = vld1_s64(Ptr); Ptr++; vst1_s64(Ptr2, Tmp_64x1); Ptr2++; Tmp_64x1 = vqrshl_s64(Tmp_64x1, MaxShiftBits_neg_64x1); Tmp_64x2 = vcombine_s64(Tmp_64x1, Tmp_64x1); Tmp_32x2x2 = vuzp_s32(vget_low_s32(vreinterpretq_s32_s64(Tmp_64x2)), vget_high_s32(vreinterpretq_s32_s64(Tmp_64x2))); Tmp_32x2 = Tmp_32x2x2.val[0]; // LSB of Tmp_64x2 (MSB is dummy) TempX_64x2 = vqdmull_s32(Tmp_32x2, Tmp_32x2); TempX_64x1 = vget_low_s64(TempX_64x2); diffX_64x1 = vqsub_s64(Peak_64x1, TempX_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63)); // sign(diffX) diffX_64x2 = vcombine_s64(diffX_64x1, diffX_64x1); diffX_low_32x2 = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32); // wextract_l(diffX), wextract_l(diffX) diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32); // wround_L(diffX), wround_L(diffX) Tmp_64x2 = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2)); // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef) Tmp_64x2 = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2); // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef)) Tmp_64x2 = vqaddq_s64(TempX_64x2, Tmp_64x2); Peak_64x1 = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2)); Tmp_64x1 = vqsub_s64(Peak_64x1, PeakMax_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63)); // sign(Peak_64x1 - PeakMax_64x1) PeakMax_64x1 = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1); } } else { // compensate Peak according to the previous shift bits Peak_64x1 = vqrshl_s64(Peak_64x1, Peak_scale_64x1); // amplitude measure Zero_s64x1 = vdup_n_s64(0); for(k = cpt1; k > 0; k--) { Tmp_64x1 = vld1_s64(Ptr); Ptr++; vst1_s64(Ptr2, Tmp_64x1); Ptr2++; bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63)); // sign(Tmp_64x1) TempX_64x1 = vqsub_s64(Zero_s64x1, Tmp_64x1); // -Tmp_64x1 TempX_64x1 = vbsl_s64(bsl_u64x1, TempX_64x1, Tmp_64x1); TempX_64x1 = vqrshl_s64(TempX_64x1, MaxShiftBits_hd_64x1); TempX_64x2 = vcombine_s64(TempX_64x1, TempX_64x1); diffX_64x1 = vqsub_s64(Peak_64x1, TempX_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63)); // sign(diffX) diffX_64x2 = vcombine_s64(diffX_64x1, diffX_64x1); diffX_low_32x2 = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32); // wextract_l(diffX), wextract_l(diffX) diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32); // wround_L(diffX), wround_L(diffX) Tmp_64x2 = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2)); // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef) Tmp_64x2 = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2); // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef)) Tmp_64x2 = vqaddq_s64(TempX_64x2, Tmp_64x2); Peak_64x1 = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2)); Tmp_64x1 = vqsub_s64(Peak_64x1, PeakMax_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63)); // sign(Peak_64x1 - PeakMax_64x1) PeakMax_64x1 = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1); } Ptr2 = HeapPtr->LimiterLABuf[ch]; for(k = cpt2; k > 0; k--) { Tmp_64x1 = vld1_s64(Ptr); Ptr++; vst1_s64(Ptr2, Tmp_64x1); Ptr2++; bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63)); // sign(Tmp_64x1) TempX_64x1 = vqsub_s64(Zero_s64x1, Tmp_64x1); // -Tmp_64x1 TempX_64x1 = vbsl_s64(bsl_u64x1, TempX_64x1, Tmp_64x1); TempX_64x1 = vqrshl_s64(TempX_64x1, MaxShiftBits_hd_64x1); TempX_64x2 = vcombine_s64(TempX_64x1, TempX_64x1); diffX_64x1 = vqsub_s64(Peak_64x1, TempX_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63)); // sign(diffX) diffX_64x2 = vcombine_s64(diffX_64x1, diffX_64x1); diffX_low_32x2 = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32); // wextract_l(diffX), wextract_l(diffX) diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32); // wround_L(diffX), wround_L(diffX) Tmp_64x2 = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2)); // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef) Tmp_64x2 = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2); // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef)) Tmp_64x2 = vqaddq_s64(TempX_64x2, Tmp_64x2); Peak_64x1 = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2)); Tmp_64x1 = vqsub_s64(Peak_64x1, PeakMax_64x1); bsl_u64x1 = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63)); // sign(Peak_64x1 - PeakMax_64x1) PeakMax_64x1 = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1); } } HeapPtr->LimiterPeak[ch] = vget_lane_s64(Peak_64x1, 0); // save history } // for(ch = 0...) PeakMax = vget_lane_s64(PeakMax_64x1, 0); HeapPtr->PrevShiftBits = MaxShiftBits; STOP_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_INSERT_NEW_SUBBAND) if(PeakMax < MDRC5B_ALMOST_ZERO_THRESH) { PeakdB = (MDRC5B_POWER_DB_MINUS_INF << 16); // 8.16, [-128.0, 127.0] dB } else { Peak_u32x2x2 = vuzp_u32(vreinterpret_u32_s64(PeakMax_64x1), vreinterpret_u32_s64(PeakMax_64x1)); bsl_u32x2 = vceq_u32(Peak_u32x2x2.val[1], vdup_n_u32(0)); Peak_exp_u32x2 = vadd_u32(vclz_u32(Peak_u32x2x2.val[0]), vdup_n_u32(32)); Peak_exp2_u32x2 = vclz_u32(Peak_u32x2x2.val[1]); Peak_exp_u32x2 = vbsl_u32(bsl_u32x2, Peak_exp_u32x2, Peak_exp2_u32x2); Peak_mant_u32x2 = vrshrn_n_u64(vshlq_u64(vreinterpretq_u64_s64(vcombine_s64(PeakMax_64x1, PeakMax_64x1)), vreinterpretq_s64_u64(vmovl_u32(Peak_exp_u32x2))), 32); // if(Peak_mant >= sqrt(0.5)) // { // Peak_exp--; // Peak_mant >>= 1; // } bsl_u32x2 = vcge_u32(Peak_mant_u32x2, vdup_n_u32(0xB504F334)); Peak_exp_u32x2 = vbsl_u32(bsl_u32x2, vsub_u32(Peak_exp_u32x2, vdup_n_u32(1)), Peak_exp_u32x2); Peak_mant_u32x2 = vbsl_u32(bsl_u32x2, vrshr_n_u32(Peak_mant_u32x2, 1), Peak_mant_u32x2); Peak_exp_32x2 = vreinterpret_s32_u32(Peak_exp_u32x2); #ifdef SAMPLES_24_BITS // correction of 16 bits if input samples are 24 bits Peak_exp_32x2 = vsub_s32(Peak_exp_32x2, vdup_n_s32(16)); #endif // SAMPLES_24_BITS // at this point : sqrt(0.5)/2 <= Peak_mant < sqrt(0.5) // // ln(1+x) = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8 + x^9/9 - x^10/10 ... accuracy OK if |x| < 0.5 // sqrt(0.5)/2 <= Peak_mant < sqrt(0.5) => sqrt(0.5)-1 <= 2*Peak_mant-1 < 2*sqrt(0.5)-1 // => ln(Peak_mant) = ln(1+x)-ln(2) with x=2*Peak_mant-1, i.e. |x| < 0.414214... // x=2*PeakMax_mant-1 in Q31 // => sqrt(0.5)-1 <= x < 2*sqrt(0.5)-1 x_32x2 = vreinterpret_s32_u32(vsub_u32(Peak_mant_u32x2, vdup_n_u32(0x80000000))); PeakdB_32x2 = x_32x2; // PeakdB = x xn_32x2 = vqrdmulh_s32(x_32x2, x_32x2); // xn = x^2 PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vrshr_n_s32(xn_32x2, 1)); // PeakdB = x - x^2/2 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^3 PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x2AAAAAAB))); // PeakdB = x - x^2/2 + x^3/3 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^4 PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vrshr_n_s32(xn_32x2, 2)); // PeakdB = x - x^2/2 + x^3/3 - x^4/4 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^5 PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x1999999A))); // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^6 PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x15555555))); // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^7 PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x12492492))); // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^8 PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vrshr_n_s32(xn_32x2, 3)); // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^9 PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x0E38E38E))); // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8 + x^9/9 xn_32x2 = vqrdmulh_s32(xn_32x2, x_32x2); // xn = x^10 PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x0CCCCCCD))); // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8 + x^9/9 - x^10/10 // at this point : PeakMaxdB contains ln(1+x) in Q31 if(RmsMeasure) { // dB(power) = 10*log10(power) // PeakMaxdB = 10*log10(PeakMax)+20*log10(2)*(HEADROOM+MaxShiftBits) // = 10*ln(PeakMax)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 10/ln(10)*ln(PeakMax_mant*2^(-PeakMax_exp))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 10/ln(10)*(ln(PeakMax_mant)-PeakMax_exp*ln(2))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 10/ln(10)*ln(PeakMax_mant)-PeakMax_exp*10*ln(2)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 10/ln(10)*ln(PeakMax_mant)+10*ln(2)/ln(10)*(2*(HEADROOM+MaxShiftBits)-PeakMax_exp) // // => RmsdB = 10/ln(10)*ln(1+x)+10*ln(2)/ln(10)*(2*(HEADROOM+MaxShiftBits)-PeakMax_exp) // => RmsdB (Q16) = 0x457CB*ln(1+x)+0x302A3*(2*(HEADROOM+MaxShiftBits)-PeakMax_exp) // fractional mutiply 0x457CB*ln(1+x) in Q16 PeakdB_32x2 = vqrdmulh_s32(PeakdB_32x2, vdup_n_s32(0x457CB)); // PeakdB_exp = 2*(HEADROOM+MaxShiftBits)-PeakdB_exp Peak_exp_32x2 = vsub_s32(vdup_n_s32(2 * (HEADROOM + MaxShiftBits)), Peak_exp_32x2); // PeakMaxdB final value (integer mac 0x302A3*PeakdB_exp) PeakdB_32x2 = vmla_s32(PeakdB_32x2, Peak_exp_32x2, vdup_n_s32(0x302A3)); } else { // dB(power) = 20*log10(abs) // PeakMaxdB = 20*log10(PeakMax)+20*log10(2)*(HEADROOM+MaxShiftBits) // = 20*ln(PeakMax)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 20/ln(10)*ln(PeakMax_mant*2^(-PeakMax_exp))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 20/ln(10)*(ln(PeakMax_mant)-PeakMax_exp*ln(2))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 20/ln(10)*ln(PeakMax_mant)-PeakMax_exp*20*ln(2)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits) // = 20/ln(10)*ln(PeakMax_mant)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits-PeakMax_exp) // // => RmsdB = 20/ln(10)*ln(1+x)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits-PeakMax_exp) // => RmsdB (Q16) = 0x8AF96*ln(1+x)+0x60546*(HEADROOM+MaxShiftBits-PeakMax_exp) // fractional mutiply 0x8AF96*ln(1+x) in Q16 PeakdB_32x2 = vqrdmulh_s32(PeakdB_32x2, vdup_n_s32(0x8AF96)); // PeakdB_exp = HEADROOM+MaxShiftBits-PeakdB_exp Peak_exp_32x2 = vsub_s32(vdup_n_s32(HEADROOM + MaxShiftBits), Peak_exp_32x2); // PeakMaxdB final value (integer mac 0x60546*PeakdB_exp) PeakdB_32x2 = vmla_s32(PeakdB_32x2, Peak_exp_32x2, vdup_n_s32(0x60546)); } PeakdB = vget_lane_s32(PeakdB_32x2, 0); } #ifdef DEBUG_LIMITER_OUTPUT if((debug_cpt_samples >= DEBUG_CPT_MIN) && (debug_cpt_samples <= DEBUG_CPT_MAX)) { char string[100]; debug_write_string("MRDC5B_LIMITER_PEAKMAX_PEAKDB\n"); sprintf(string, "PeakMax=0x%012llX, HEADROOM+MaxShiftBits=%d => PeakdB=0x%06X\n", #ifdef SAMPLES_24_BITS PeakMax & 0xFFFFFFFFFFFFLL, #else // SAMPLES_24_BITS (PeakMax >> 16) & 0xFFFFFFFFFFFFLL, #endif // SAMPLES_24_BITS HEADROOM + MaxShiftBits, PeakdB & 0xFFFFFF); debug_write_string(string); }