Exemplo n.º 1
1
int32_t dot_product(int16_t *x,
                    int16_t *y,
                    uint32_t N, //must be a multiple of 8
                    uint8_t output_shift)
{

  uint32_t n;

#if defined(__x86_64__) || defined(__i386__)
  __m128i *x128,*y128,mmtmp1,mmtmp2,mmtmp3,mmcumul,mmcumul_re,mmcumul_im;
  __m64 mmtmp7;
  __m128i minus_i = _mm_set_epi16(-1,1,-1,1,-1,1,-1,1);
  int32_t result;

  x128 = (__m128i*) x;
  y128 = (__m128i*) y;

  mmcumul_re = _mm_setzero_si128();
  mmcumul_im = _mm_setzero_si128();

  for (n=0; n<(N>>2); n++) {

    //printf("n=%d, x128=%p, y128=%p\n",n,x128,y128);
    //    print_shorts("x",&x128[0]);
    //    print_shorts("y",&y128[0]);

    // this computes Re(z) = Re(x)*Re(y) + Im(x)*Im(y)
    mmtmp1 = _mm_madd_epi16(x128[0],y128[0]);
    //    print_ints("re",&mmtmp1);
    // mmtmp1 contains real part of 4 consecutive outputs (32-bit)

    // shift and accumulate results
    mmtmp1 = _mm_srai_epi32(mmtmp1,output_shift);
    mmcumul_re = _mm_add_epi32(mmcumul_re,mmtmp1);
    //    print_ints("re",&mmcumul_re);


    // this computes Im(z) = Re(x)*Im(y) - Re(y)*Im(x)
    mmtmp2 = _mm_shufflelo_epi16(y128[0],_MM_SHUFFLE(2,3,0,1));
    //    print_shorts("y",&mmtmp2);
    mmtmp2 = _mm_shufflehi_epi16(mmtmp2,_MM_SHUFFLE(2,3,0,1));
    //    print_shorts("y",&mmtmp2);
    mmtmp2 = _mm_sign_epi16(mmtmp2,minus_i);
    //        print_shorts("y",&mmtmp2);

    mmtmp3 = _mm_madd_epi16(x128[0],mmtmp2);
    //        print_ints("im",&mmtmp3);
    // mmtmp3 contains imag part of 4 consecutive outputs (32-bit)

    // shift and accumulate results
    mmtmp3 = _mm_srai_epi32(mmtmp3,output_shift);
    mmcumul_im = _mm_add_epi32(mmcumul_im,mmtmp3);
    //    print_ints("im",&mmcumul_im);

    x128++;
    y128++;
  }

  // this gives Re Re Im Im
  mmcumul = _mm_hadd_epi32(mmcumul_re,mmcumul_im);
  //  print_ints("cumul1",&mmcumul);

  // this gives Re Im Re Im
  mmcumul = _mm_hadd_epi32(mmcumul,mmcumul);

  //  print_ints("cumul2",&mmcumul);


  //mmcumul = _mm_srai_epi32(mmcumul,output_shift);
  // extract the lower half
  mmtmp7 = _mm_movepi64_pi64(mmcumul);
  //  print_ints("mmtmp7",&mmtmp7);
  // pack the result
  mmtmp7 = _mm_packs_pi32(mmtmp7,mmtmp7);
  //  print_shorts("mmtmp7",&mmtmp7);
  // convert back to integer
  result = _mm_cvtsi64_si32(mmtmp7);

  _mm_empty();
  _m_empty();

  return(result);

#elif defined(__arm__)
  int16x4_t *x_128=(int16x4_t*)x;
  int16x4_t *y_128=(int16x4_t*)y;
  int32x4_t tmp_re,tmp_im;
  int32x4_t tmp_re1,tmp_im1;
  int32x4_t re_cumul,im_cumul;
  int32x2_t re_cumul2,im_cumul2;
  int32x4_t shift = vdupq_n_s32(-output_shift); 
  int32x2x2_t result2;
  int16_t conjug[4]__attribute__((aligned(16))) = {-1,1,-1,1} ;

  re_cumul = vdupq_n_s32(0);
  im_cumul = vdupq_n_s32(0); 

  for (n=0; n<(N>>2); n++) {

    tmp_re  = vmull_s16(*x_128++, *y_128++);
    //tmp_re = [Re(x[0])Re(y[0]) Im(x[0])Im(y[0]) Re(x[1])Re(y[1]) Im(x[1])Im(y[1])] 
    tmp_re1 = vmull_s16(*x_128++, *y_128++);
    //tmp_re1 = [Re(x1[1])Re(x2[1]) Im(x1[1])Im(x2[1]) Re(x1[1])Re(x2[2]) Im(x1[1])Im(x2[2])] 
    tmp_re  = vcombine_s32(vpadd_s32(vget_low_s32(tmp_re),vget_high_s32(tmp_re)),
                           vpadd_s32(vget_low_s32(tmp_re1),vget_high_s32(tmp_re1)));
    //tmp_re = [Re(ch[0])Re(rx[0])+Im(ch[0])Im(ch[0]) Re(ch[1])Re(rx[1])+Im(ch[1])Im(ch[1]) Re(ch[2])Re(rx[2])+Im(ch[2]) Im(ch[2]) Re(ch[3])Re(rx[3])+Im(ch[3])Im(ch[3])] 

    tmp_im  = vmull_s16(vrev32_s16(vmul_s16(*x_128++,*(int16x4_t*)conjug)),*y_128++);
    //tmp_im = [-Im(ch[0])Re(rx[0]) Re(ch[0])Im(rx[0]) -Im(ch[1])Re(rx[1]) Re(ch[1])Im(rx[1])]
    tmp_im1 = vmull_s16(vrev32_s16(vmul_s16(*x_128++,*(int16x4_t*)conjug)),*y_128++);
    //tmp_im1 = [-Im(ch[2])Re(rx[2]) Re(ch[2])Im(rx[2]) -Im(ch[3])Re(rx[3]) Re(ch[3])Im(rx[3])]
    tmp_im  = vcombine_s32(vpadd_s32(vget_low_s32(tmp_im),vget_high_s32(tmp_im)),
                           vpadd_s32(vget_low_s32(tmp_im1),vget_high_s32(tmp_im1)));
    //tmp_im = [-Im(ch[0])Re(rx[0])+Re(ch[0])Im(rx[0]) -Im(ch[1])Re(rx[1])+Re(ch[1])Im(rx[1]) -Im(ch[2])Re(rx[2])+Re(ch[2])Im(rx[2]) -Im(ch[3])Re(rx[3])+Re(ch[3])Im(rx[3])]

    re_cumul = vqaddq_s32(re_cumul,vqshlq_s32(tmp_re,shift));
    im_cumul = vqaddq_s32(im_cumul,vqshlq_s32(tmp_im,shift));
  }
  
  re_cumul2 = vpadd_s32(vget_low_s32(re_cumul),vget_high_s32(re_cumul));
  im_cumul2 = vpadd_s32(vget_low_s32(im_cumul),vget_high_s32(im_cumul));
  re_cumul2 = vpadd_s32(re_cumul2,re_cumul2);
  im_cumul2 = vpadd_s32(im_cumul2,im_cumul2);
  result2   = vzip_s32(re_cumul2,im_cumul2);
  return(vget_lane_s32(result2.val[0],0));
#endif
}
Exemplo n.º 2
0
void test_vget_lanes32 (void)
{
  int32_t out_int32_t;
  int32x2_t arg0_int32x2_t;

  out_int32_t = vget_lane_s32 (arg0_int32x2_t, 1);
}
static INLINE int horizontal_add_s16x8(const int16x8_t v_16x8) {
  const int32x4_t a = vpaddlq_s16(v_16x8);
  const int64x2_t b = vpaddlq_s32(a);
  const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
                               vreinterpret_s32_s64(vget_high_s64(b)));
  return vget_lane_s32(c, 0);
}
Exemplo n.º 4
0
// ref, src = [0, 510] - max diff = 16-bits
// bwl = {2, 3, 4}, width = {16, 32, 64}
int vp9_vector_var_neon(int16_t const *ref, int16_t const *src, const int bwl) {
  int width = 4 << bwl;
  int32x4_t sse = vdupq_n_s32(0);
  int16x8_t total = vdupq_n_s16(0);

  assert(width >= 8);
  assert((width % 8) == 0);

  do {
    const int16x8_t r = vld1q_s16(ref);
    const int16x8_t s = vld1q_s16(src);
    const int16x8_t diff = vsubq_s16(r, s);  // [-510, 510], 10 bits.
    const int16x4_t diff_lo = vget_low_s16(diff);
    const int16x4_t diff_hi = vget_high_s16(diff);
    sse = vmlal_s16(sse, diff_lo, diff_lo);  // dynamic range 26 bits.
    sse = vmlal_s16(sse, diff_hi, diff_hi);
    total = vaddq_s16(total, diff);  // dynamic range 16 bits.

    ref += 8;
    src += 8;
    width -= 8;
  } while (width != 0);

  {
    // Note: 'total''s pairwise addition could be implemented similarly to
    // horizontal_add_u16x8(), but one less vpaddl with 'total' when paired
    // with the summation of 'sse' performed better on a Cortex-A15.
    const int32x4_t t0 = vpaddlq_s16(total);  // cascading summation of 'total'
    const int32x2_t t1 = vadd_s32(vget_low_s32(t0), vget_high_s32(t0));
    const int32x2_t t2 = vpadd_s32(t1, t1);
    const int t = vget_lane_s32(t2, 0);
    const int64x2_t s0 = vpaddlq_s32(sse);  // cascading summation of 'sse'.
    const int32x2_t s1 = vadd_s32(vreinterpret_s32_s64(vget_low_s64(s0)),
                                  vreinterpret_s32_s64(vget_high_s64(s0)));
    const int s = vget_lane_s32(s1, 0);
    const int shift_factor = bwl + 2;
    return s - ((t * t) >> shift_factor);
  }
}
Exemplo n.º 5
0
static WEBP_INLINE uint32_t Select(const uint32_t* const c0,
                                   const uint32_t* const c1,
                                   const uint32_t* const c2) {
  const uint8x8_t p0 = vreinterpret_u8_u64(vcreate_u64(*c0));
  const uint8x8_t p1 = vreinterpret_u8_u64(vcreate_u64(*c1));
  const uint8x8_t p2 = vreinterpret_u8_u64(vcreate_u64(*c2));
  const uint8x8_t bc = vabd_u8(p1, p2);   // |b-c|
  const uint8x8_t ac = vabd_u8(p0, p2);   // |a-c|
  const int16x4_t sum_bc = vreinterpret_s16_u16(vpaddl_u8(bc));
  const int16x4_t sum_ac = vreinterpret_s16_u16(vpaddl_u8(ac));
  const int32x2_t diff = vpaddl_s16(vsub_s16(sum_bc, sum_ac));
  const int32_t pa_minus_pb = vget_lane_s32(diff, 0);
  return (pa_minus_pb <= 0) ? *c0 : *c1;
}
Exemplo n.º 6
0
Arquivo: avg_neon.c Projeto: negge/aom
// coeff: 16 bits, dynamic range [-32640, 32640].
// length: value range {16, 64, 256, 1024}.
int aom_satd_neon(const int16_t *coeff, int length) {
  const int16x4_t zero = vdup_n_s16(0);
  int32x4_t accum = vdupq_n_s32(0);

  do {
    const int16x8_t src0 = vld1q_s16(coeff);
    const int16x8_t src8 = vld1q_s16(coeff + 8);
    accum = vabal_s16(accum, vget_low_s16(src0), zero);
    accum = vabal_s16(accum, vget_high_s16(src0), zero);
    accum = vabal_s16(accum, vget_low_s16(src8), zero);
    accum = vabal_s16(accum, vget_high_s16(src8), zero);
    length -= 16;
    coeff += 16;
  } while (length != 0);

  {
    // satd: 26 bits, dynamic range [-32640 * 1024, 32640 * 1024]
    const int64x2_t s0 = vpaddlq_s32(accum);  // cascading summation of 'accum'.
    const int32x2_t s1 = vadd_s32(vreinterpret_s32_s64(vget_low_s64(s0)),
                                  vreinterpret_s32_s64(vget_high_s64(s0)));
    const int satd = vget_lane_s32(s1, 0);
    return satd;
  }
}
Exemplo n.º 7
0
int64_t test_vget_lane_s32(int32x2_t v1) {
  // CHECK: test_vget_lane_s32
  return vget_lane_s32(v1, 1);
  // CHECK: smov {{x[0-9]+}}, {{v[0-9]+}}.s[1]
}
Exemplo n.º 8
0
int vp8_denoiser_filter_neon(unsigned char *mc_running_avg_y,
                             int mc_running_avg_y_stride,
                             unsigned char *running_avg_y,
                             int running_avg_y_stride,
                             unsigned char *sig, int sig_stride,
                             unsigned int motion_magnitude,
                             int increase_denoising) {
    /* If motion_magnitude is small, making the denoiser more aggressive by
     * increasing the adjustment for each level, level1 adjustment is
     * increased, the deltas stay the same.
     */
    int shift_inc  = (increase_denoising &&
        motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0;
    const uint8x16_t v_level1_adjustment = vmovq_n_u8(
        (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 + shift_inc : 3);
    const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
    const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
    const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
    const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
    const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
    int64x2_t v_sum_diff_total = vdupq_n_s64(0);

    /* Go over lines. */
    int r;
    for (r = 0; r < 16; ++r) {
        /* Load inputs. */
        const uint8x16_t v_sig = vld1q_u8(sig);
        const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);

        /* Calculate absolute difference and sign masks. */
        const uint8x16_t v_abs_diff      = vabdq_u8(v_sig, v_mc_running_avg_y);
        const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y);
        const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y);

        /* Figure out which level that put us in. */
        const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold,
                                                  v_abs_diff);
        const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold,
                                                  v_abs_diff);
        const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold,
                                                  v_abs_diff);

        /* Calculate absolute adjustments for level 1, 2 and 3. */
        const uint8x16_t v_level2_adjustment = vandq_u8(v_level2_mask,
                                                        v_delta_level_1_and_2);
        const uint8x16_t v_level3_adjustment = vandq_u8(v_level3_mask,
                                                        v_delta_level_2_and_3);
        const uint8x16_t v_level1and2_adjustment = vaddq_u8(v_level1_adjustment,
            v_level2_adjustment);
        const uint8x16_t v_level1and2and3_adjustment = vaddq_u8(
            v_level1and2_adjustment, v_level3_adjustment);

        /* Figure adjustment absolute value by selecting between the absolute
         * difference if in level0 or the value for level 1, 2 and 3.
         */
        const uint8x16_t v_abs_adjustment = vbslq_u8(v_level1_mask,
            v_level1and2and3_adjustment, v_abs_diff);

        /* Calculate positive and negative adjustments. Apply them to the signal
         * and accumulate them. Adjustments are less than eight and the maximum
         * sum of them (7 * 16) can fit in a signed char.
         */
        const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask,
                                                     v_abs_adjustment);
        const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask,
                                                     v_abs_adjustment);

        uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment);
        v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment);

        /* Store results. */
        vst1q_u8(running_avg_y, v_running_avg_y);

        /* Sum all the accumulators to have the sum of all pixel differences
         * for this macroblock.
         */
        {
            const int8x16_t v_sum_diff =
                vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
                          vreinterpretq_s8_u8(v_neg_adjustment));

            const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);

            const int32x4_t fedc_ba98_7654_3210 =
                vpaddlq_s16(fe_dc_ba_98_76_54_32_10);

            const int64x2_t fedcba98_76543210 =
                vpaddlq_s32(fedc_ba98_7654_3210);

            v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
        }

        /* Update pointers for next iteration. */
        sig += sig_stride;
        mc_running_avg_y += mc_running_avg_y_stride;
        running_avg_y += running_avg_y_stride;
    }

    /* Too much adjustments => copy block. */
    {
        int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total),
                                      vget_low_s64(v_sum_diff_total));
        int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
        int sum_diff_thresh = SUM_DIFF_THRESHOLD;

        if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH;
        if (sum_diff > sum_diff_thresh) {
          // Before returning to copy the block (i.e., apply no denoising),
          // checK if we can still apply some (weaker) temporal filtering to
          // this block, that would otherwise not be denoised at all. Simplest
          // is to apply an additional adjustment to running_avg_y to bring it
          // closer to sig. The adjustment is capped by a maximum delta, and
          // chosen such that in most cases the resulting sum_diff will be
          // within the accceptable range given by sum_diff_thresh.

          // The delta is set by the excess of absolute pixel diff over the
          // threshold.
          int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1;
          // Only apply the adjustment for max delta up to 3.
          if (delta < 4) {
            const uint8x16_t k_delta = vmovq_n_u8(delta);
            sig -= sig_stride * 16;
            mc_running_avg_y -= mc_running_avg_y_stride * 16;
            running_avg_y -= running_avg_y_stride * 16;
            for (r = 0; r < 16; ++r) {
              uint8x16_t v_running_avg_y = vld1q_u8(running_avg_y);
              const uint8x16_t v_sig = vld1q_u8(sig);
              const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);

              /* Calculate absolute difference and sign masks. */
              const uint8x16_t v_abs_diff      = vabdq_u8(v_sig,
                                                          v_mc_running_avg_y);
              const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig,
                                                          v_mc_running_avg_y);
              const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig,
                                                          v_mc_running_avg_y);
              // Clamp absolute difference to delta to get the adjustment.
              const uint8x16_t v_abs_adjustment =
                  vminq_u8(v_abs_diff, (k_delta));

              const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask,
                                                           v_abs_adjustment);
              const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask,
                                                           v_abs_adjustment);

              v_running_avg_y = vqsubq_u8(v_running_avg_y, v_pos_adjustment);
              v_running_avg_y = vqaddq_u8(v_running_avg_y, v_neg_adjustment);

              /* Store results. */
              vst1q_u8(running_avg_y, v_running_avg_y);

              {
                  const int8x16_t v_sum_diff =
                      vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment),
                                vreinterpretq_s8_u8(v_pos_adjustment));

                  const int16x8_t fe_dc_ba_98_76_54_32_10 =
                      vpaddlq_s8(v_sum_diff);
                  const int32x4_t fedc_ba98_7654_3210 =
                      vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
                  const int64x2_t fedcba98_76543210 =
                      vpaddlq_s32(fedc_ba98_7654_3210);

                  v_sum_diff_total = vqaddq_s64(v_sum_diff_total,
                                                fedcba98_76543210);
              }
              /* Update pointers for next iteration. */
              sig += sig_stride;
              mc_running_avg_y += mc_running_avg_y_stride;
              running_avg_y += running_avg_y_stride;
            }
            {
              // Update the sum of all pixel differences of this MB.
              x = vqadd_s64(vget_high_s64(v_sum_diff_total),
                            vget_low_s64(v_sum_diff_total));
              sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);

              if (sum_diff > sum_diff_thresh) {
                return COPY_BLOCK;
              }
            }
          } else {
            return COPY_BLOCK;
          }
        }
    }
Exemplo n.º 9
0
int32_t test_vget_lane_s32(int32x2_t a) {
  // CHECK-LABEL: test_vget_lane_s32:
  // CHECK-NEXT:  mov.s  w0, v0[1]
  // CHECK-NEXT:  ret
  return vget_lane_s32(a, 1);
}
void mdrc5b_apply_limiter(MDRC5B_LOCAL_STRUCT_T *HeapPtr)
{
    unsigned int LaIdx;
    unsigned int NumMainCh;
    unsigned int Samples;
    unsigned int ch, k, n;
    MMlong       *Ptr;
    MMlong       *Ptr2;

    MMlong       *MemOutPtr;
    MMshort      PeakdB;
    MMlong       PeakMax;
    int          RmsMeasure;
    MMshort      LimiterAtCoef;
    MMshort      LimiterReCoef;
    MMshort      LimiterGainMant[MDRC5B_BLOCK_SIZE + 1];
    MMshort      LimiterGainExp;
    MMshort      LimiterTargetGaindB;
    unsigned int LimiterHoldRem;
    unsigned int LimiterHtSamp;
    MMshort      Exp, TargetGain;
    MMshort      MaxShiftBits;
    unsigned int lookahead_len = (unsigned int) HeapPtr->LimiterLALen;
    unsigned int cpt1, cpt2;
    uint32x2x2_t Temp_u32x2x2;
    uint32x2_t   Ldbits_u32x2, Ldbits2_u32x2;
    uint32x2_t   bsl_u32x2;
    int32x2_t    LimGainMant_32x2;
    int64x2_t    TempX_64x2, MemOut_64x2;
    int64x2_t    Tmp_64x2;
    int64x2_t    LimiterGainExp_64x2, sample_64x2;
    int64x1_t    TempX_64x1, sample_64x1;
    int32_t      *LimiterGainMant_ptr;
    int32x2_t    Tmp_32x2, Ldbits_32x2, n_32x2;
    int32x2_t    TempX_low_32x2, TempX_high_32x2;
    int32x2x2_t  Tmp_32x2x2;
    int64x1_t    Peak_64x1, PeakMax_64x1, Tmp_64x1, diffX_64x1;
    int64x1_t    Peak_scale_pow_64x1, Peak_scale_64x1, Zero_s64x1;
    int64x1_t    MaxShiftBits_neg_64x1, MaxShiftBits_hd_64x1;
    int64x2_t    diffX_64x2;
    uint64x1_t   bsl_u64x1;
    int32x2_t    LimiterPeakCoef_32x2, diffX_low_32x2, diffX_high_32x2;
    int32x2_t    TargetGain_32x2;
    uint32x2x2_t Peak_u32x2x2;
    uint32x2_t   Peak_exp_u32x2, Peak_exp2_u32x2, Peak_mant_u32x2;
    int32x2_t    x_32x2, xn_32x2, PeakdB_32x2, Peak_exp_32x2;
    int32x2_t    LimiterTargetGaindB_32x2, Exp_32x2, LimiterCoef_32x2;
    int32x4_t    Tmp_32x4;


    START_PMU_MEASURE(PMU_MEASURE_MRDC5B_APPLY_LIMITER)

    START_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_COMPUTE_MAX_SHIFT_LEFT)

    Samples   = (unsigned int) HeapPtr->BlockSize;
    NumMainCh = (unsigned int) HeapPtr->NumMainCh;

    TempX_64x2 = vdupq_n_s64(0);
    for(ch = 0; ch < NumMainCh; ch++)
    {
        Ptr = HeapPtr->MainInBuf[ch];
        // compute the number of bits needs to be shifted to avoid overflow
        for(k = (Samples >> 1); k > 0; k--)
        {
            sample_64x2 = vld1q_s64(Ptr);
            Ptr        +=2;
            sample_64x2 = veorq_s64(sample_64x2, vshrq_n_s64(sample_64x2, 63));
            TempX_64x2  = vorrq_s64(TempX_64x2, sample_64x2);
        }
        if(Samples & 1)
        {
            sample_64x1 = vld1_s64(Ptr);
            sample_64x1 = veor_s64(sample_64x1, vshr_n_s64(sample_64x1, 63));
            TempX_64x2  = vorrq_s64(TempX_64x2, vcombine_s64(sample_64x1, sample_64x1));
        }
    }
    TempX_64x1    = vorr_s64(vget_low_s64(TempX_64x2), vget_high_s64(TempX_64x2));
    Temp_u32x2x2  = vuzp_u32(vreinterpret_u32_s64(TempX_64x1), vreinterpret_u32_s64(TempX_64x1));
    bsl_u32x2     = vceq_u32(Temp_u32x2x2.val[1], vdup_n_u32(0));                  // MSB == 0 ?
    // use clz instead of cls because we are sure that input value is positive
    // and because cls(LSB) could be wrong (if MSB is equal to 0 and bit 31 of LSL is 1)
    // thus clz result will be 1 more than cls result (that's why you may see (Ldbits - 1)
    // instead of Ldbits below)
    Ldbits_u32x2  = vadd_u32(vclz_u32(Temp_u32x2x2.val[0]), vdup_n_u32(32));       // clz(LSB)+32
    Ldbits2_u32x2 = vclz_u32(Temp_u32x2x2.val[1]);                                 // clz(MSB)
    Ldbits_u32x2  = vbsl_u32(bsl_u32x2, Ldbits_u32x2, Ldbits2_u32x2);              // MSB == 0 ? clz(LSB)+32 : clz(MSB)
    bsl_u32x2     = vceq_u32(Ldbits_u32x2, vdup_n_u32(64));                        // Ldbits == 64 ? (i.e. TempX == 0 ?)
    // the aim of MaxShiftBits is that sample will be shifted so that it occupies
    // 24 significant bits for 24 bits samples or 32 significant bits for 32 bits samples
    // but we are in 64 bits architecture on CA9/NEON
    // so we must right shift of ((64 - 24) - (Ldbits - 1)) bits for 24 bits samples
    // or of ((64 - 32) - (Ldbits - 1)) bits for 32 bits samples
    // and we add 1 because it was done this way on MMDSP (I don't know why !)
#ifdef SAMPLES_24_BITS
    // MaxShiftBits = ((64 - 24) - (Ldbits - 1)) + 1
    //              = 42 - Ldbits
    Ldbits_32x2     = vsub_s32(vdup_n_s32(42), vreinterpret_s32_u32(Ldbits_u32x2));
#else // SAMPLES_24_BITS
    // MaxShiftBits = ((64 - 32) - (Ldbits - 1)) + 1
    //              = 34 - Ldbits
    Ldbits_32x2     = vsub_s32(vdup_n_s32(34), vreinterpret_s32_u32(Ldbits_u32x2));
#endif // SAMPLES_24_BITS
    Ldbits_32x2     = vmax_s32(vdup_n_s32(1), Ldbits_32x2);
    Ldbits_32x2     = vbsl_s32(bsl_u32x2, vdup_n_s32(1), Ldbits_32x2);              // if(TempX == 0) Ldbits = 1
    MaxShiftBits    = vget_lane_s32(Ldbits_32x2, 0);

    STOP_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_COMPUTE_MAX_SHIFT_LEFT)
#ifdef DEBUG_LIMITER_OUTPUT
    if((debug_cpt_samples >= DEBUG_CPT_MIN) && (debug_cpt_samples <= DEBUG_CPT_MAX))
    {
        char string[100];

        debug_write_string("MRDC5B_LIMITER_COMPUTE_MAX_SHIFT_LEFT\n");
        sprintf(string, "MaxShiftBits=%d\n", MaxShiftBits);
        debug_write_string(string);
    }
#endif  // DEBUG_LIMITER_OUTPUT


    START_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_INSERT_NEW_SUBBAND)

    // insert the new subband samples into the lookahead buffers
    RmsMeasure = HeapPtr->Limiter.RmsMeasure;

    LaIdx = (unsigned int) HeapPtr->LimiterLaIdx;
    if(LaIdx + Samples >= lookahead_len)
    {
        cpt1                  = lookahead_len - LaIdx;
        cpt2                  = Samples - cpt1;
        // update index
        HeapPtr->LimiterLaIdx = (int) cpt2;
    }
    else
    {
        cpt1                  = Samples;
        cpt2                  = 0;
        // update index
        HeapPtr->LimiterLaIdx = (int) (LaIdx + Samples);
    }

    LimiterPeakCoef_32x2  = vdup_n_s32(HeapPtr->LimiterPeakAtCoef);                               // LimiterPeakAtCoef, LimiterPeakAtCoef
    LimiterPeakCoef_32x2  = vset_lane_s32(HeapPtr->LimiterPeakReCoef, LimiterPeakCoef_32x2, 1);   // LimiterPeakAtCoef, LimiterPeakReCoef
    Peak_scale_64x1       = vdup_n_s64(HeapPtr->PrevShiftBits - MaxShiftBits);
    Peak_scale_pow_64x1   = vshl_n_s64(Peak_scale_64x1, 1);
    MaxShiftBits_neg_64x1 = vdup_n_s64(-MaxShiftBits);
#ifdef SAMPLES_24_BITS
    MaxShiftBits_hd_64x1  = vdup_n_s64(24 - MaxShiftBits);
#else // SAMPLES_24_BITS
    MaxShiftBits_hd_64x1  = vdup_n_s64(32 - MaxShiftBits);
#endif // SAMPLES_24_BITS
    PeakMax_64x1          = vdup_n_s64(0);

    for(ch = 0; ch < NumMainCh; ch++)
    {
        Ptr  = HeapPtr->MainInBuf[ch];
        Ptr2 = HeapPtr->LimiterLABuf[ch] + LaIdx;  // go to the first valid sample

        Peak_64x1 = vdup_n_s64(HeapPtr->LimiterPeak[ch]);
        if(RmsMeasure)
        {
            // compensate Peak according to the previous shift bits
            Peak_64x1 = vqrshl_s64(Peak_64x1, Peak_scale_pow_64x1);                                 // neg value => shift right rounding

            // rms measure
            for(k = cpt1; k > 0; k--)
            {
                Tmp_64x1        = vld1_s64(Ptr);
                Ptr++;
                vst1_s64(Ptr2, Tmp_64x1);
                Ptr2++;
                Tmp_64x1        = vqrshl_s64(Tmp_64x1, MaxShiftBits_neg_64x1);
                Tmp_64x2        = vcombine_s64(Tmp_64x1, Tmp_64x1);
                Tmp_32x2x2      = vuzp_s32(vget_low_s32(vreinterpretq_s32_s64(Tmp_64x2)), vget_high_s32(vreinterpretq_s32_s64(Tmp_64x2)));
                Tmp_32x2        = Tmp_32x2x2.val[0];                                                // LSB of Tmp_64x2 (MSB is dummy)
                TempX_64x2      = vqdmull_s32(Tmp_32x2, Tmp_32x2);
                TempX_64x1      = vget_low_s64(TempX_64x2);
                diffX_64x1      = vqsub_s64(Peak_64x1, TempX_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63));                 // sign(diffX)
                diffX_64x2      = vcombine_s64(diffX_64x1, diffX_64x1);
                diffX_low_32x2  = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32);                     // wextract_l(diffX), wextract_l(diffX)
                diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32);                                     // wround_L(diffX), wround_L(diffX)
                Tmp_64x2        = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2));    // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef)
                Tmp_64x2        = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2);     // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef))
                Tmp_64x2        = vqaddq_s64(TempX_64x2, Tmp_64x2);
                Peak_64x1       = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2));
                Tmp_64x1        = vqsub_s64(Peak_64x1, PeakMax_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63));                   // sign(Peak_64x1 - PeakMax_64x1)
                PeakMax_64x1    = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1);
            }
            Ptr2 = HeapPtr->LimiterLABuf[ch];
            for(k = cpt2; k > 0; k--)
            {
                Tmp_64x1        = vld1_s64(Ptr);
                Ptr++;
                vst1_s64(Ptr2, Tmp_64x1);
                Ptr2++;
                Tmp_64x1        = vqrshl_s64(Tmp_64x1, MaxShiftBits_neg_64x1);
                Tmp_64x2        = vcombine_s64(Tmp_64x1, Tmp_64x1);
                Tmp_32x2x2      = vuzp_s32(vget_low_s32(vreinterpretq_s32_s64(Tmp_64x2)), vget_high_s32(vreinterpretq_s32_s64(Tmp_64x2)));
                Tmp_32x2        = Tmp_32x2x2.val[0];                                                // LSB of Tmp_64x2 (MSB is dummy)
                TempX_64x2      = vqdmull_s32(Tmp_32x2, Tmp_32x2);
                TempX_64x1      = vget_low_s64(TempX_64x2);
                diffX_64x1      = vqsub_s64(Peak_64x1, TempX_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63));                 // sign(diffX)
                diffX_64x2      = vcombine_s64(diffX_64x1, diffX_64x1);
                diffX_low_32x2  = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32);                     // wextract_l(diffX), wextract_l(diffX)
                diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32);                                     // wround_L(diffX), wround_L(diffX)
                Tmp_64x2        = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2));    // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef)
                Tmp_64x2        = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2);     // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef))
                Tmp_64x2        = vqaddq_s64(TempX_64x2, Tmp_64x2);
                Peak_64x1       = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2));
                Tmp_64x1        = vqsub_s64(Peak_64x1, PeakMax_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63));                   // sign(Peak_64x1 - PeakMax_64x1)
                PeakMax_64x1    = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1);
            }
        }
        else
        {
            // compensate Peak according to the previous shift bits
            Peak_64x1  = vqrshl_s64(Peak_64x1, Peak_scale_64x1);

            // amplitude measure
            Zero_s64x1 = vdup_n_s64(0);
            for(k = cpt1; k > 0; k--)
            {
                Tmp_64x1        = vld1_s64(Ptr);
                Ptr++;
                vst1_s64(Ptr2, Tmp_64x1);
                Ptr2++;
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63));                   // sign(Tmp_64x1)
                TempX_64x1      = vqsub_s64(Zero_s64x1, Tmp_64x1);                                  // -Tmp_64x1
                TempX_64x1      = vbsl_s64(bsl_u64x1, TempX_64x1, Tmp_64x1);
                TempX_64x1      = vqrshl_s64(TempX_64x1, MaxShiftBits_hd_64x1);
                TempX_64x2      = vcombine_s64(TempX_64x1, TempX_64x1);
                diffX_64x1      = vqsub_s64(Peak_64x1, TempX_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63));                 // sign(diffX)
                diffX_64x2      = vcombine_s64(diffX_64x1, diffX_64x1);
                diffX_low_32x2  = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32);                     // wextract_l(diffX), wextract_l(diffX)
                diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32);                                     // wround_L(diffX), wround_L(diffX)
                Tmp_64x2        = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2));    // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef)
                Tmp_64x2        = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2);     // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef))
                Tmp_64x2        = vqaddq_s64(TempX_64x2, Tmp_64x2);
                Peak_64x1       = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2));
                Tmp_64x1        = vqsub_s64(Peak_64x1, PeakMax_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63));                   // sign(Peak_64x1 - PeakMax_64x1)
                PeakMax_64x1    = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1);
            }
            Ptr2 = HeapPtr->LimiterLABuf[ch];
            for(k = cpt2; k > 0; k--)
            {
                Tmp_64x1        = vld1_s64(Ptr);
                Ptr++;
                vst1_s64(Ptr2, Tmp_64x1);
                Ptr2++;
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63));                   // sign(Tmp_64x1)
                TempX_64x1      = vqsub_s64(Zero_s64x1, Tmp_64x1);                                  // -Tmp_64x1
                TempX_64x1      = vbsl_s64(bsl_u64x1, TempX_64x1, Tmp_64x1);
                TempX_64x1      = vqrshl_s64(TempX_64x1, MaxShiftBits_hd_64x1);
                TempX_64x2      = vcombine_s64(TempX_64x1, TempX_64x1);
                diffX_64x1      = vqsub_s64(Peak_64x1, TempX_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63));                 // sign(diffX)
                diffX_64x2      = vcombine_s64(diffX_64x1, diffX_64x1);
                diffX_low_32x2  = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32);                     // wextract_l(diffX), wextract_l(diffX)
                diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32);                                     // wround_L(diffX), wround_L(diffX)
                Tmp_64x2        = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2));    // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef)
                Tmp_64x2        = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2);     // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef))
                Tmp_64x2        = vqaddq_s64(TempX_64x2, Tmp_64x2);
                Peak_64x1       = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2));
                Tmp_64x1        = vqsub_s64(Peak_64x1, PeakMax_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63));                   // sign(Peak_64x1 - PeakMax_64x1)
                PeakMax_64x1    = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1);
            }
        }

        HeapPtr->LimiterPeak[ch] = vget_lane_s64(Peak_64x1, 0);                                     // save history
    }  // for(ch = 0...)
    PeakMax                = vget_lane_s64(PeakMax_64x1, 0);
    HeapPtr->PrevShiftBits = MaxShiftBits;

    STOP_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_INSERT_NEW_SUBBAND)


    if(PeakMax < MDRC5B_ALMOST_ZERO_THRESH)
    {
        PeakdB = (MDRC5B_POWER_DB_MINUS_INF << 16); // 8.16, [-128.0, 127.0] dB
    }
    else
    {
        Peak_u32x2x2    = vuzp_u32(vreinterpret_u32_s64(PeakMax_64x1), vreinterpret_u32_s64(PeakMax_64x1));
        bsl_u32x2       = vceq_u32(Peak_u32x2x2.val[1], vdup_n_u32(0));
        Peak_exp_u32x2  = vadd_u32(vclz_u32(Peak_u32x2x2.val[0]), vdup_n_u32(32));
        Peak_exp2_u32x2 = vclz_u32(Peak_u32x2x2.val[1]);
        Peak_exp_u32x2  = vbsl_u32(bsl_u32x2, Peak_exp_u32x2, Peak_exp2_u32x2);
        Peak_mant_u32x2 = vrshrn_n_u64(vshlq_u64(vreinterpretq_u64_s64(vcombine_s64(PeakMax_64x1, PeakMax_64x1)), vreinterpretq_s64_u64(vmovl_u32(Peak_exp_u32x2))), 32);

        // if(Peak_mant >= sqrt(0.5))
        // {
        //     Peak_exp--;
        //     Peak_mant >>= 1;
        // }
        bsl_u32x2       = vcge_u32(Peak_mant_u32x2, vdup_n_u32(0xB504F334));
        Peak_exp_u32x2  = vbsl_u32(bsl_u32x2, vsub_u32(Peak_exp_u32x2, vdup_n_u32(1)), Peak_exp_u32x2);
        Peak_mant_u32x2 = vbsl_u32(bsl_u32x2, vrshr_n_u32(Peak_mant_u32x2, 1), Peak_mant_u32x2);

        Peak_exp_32x2 = vreinterpret_s32_u32(Peak_exp_u32x2);
#ifdef SAMPLES_24_BITS
        // correction of 16 bits if input samples are 24 bits
        Peak_exp_32x2 = vsub_s32(Peak_exp_32x2, vdup_n_s32(16));
#endif // SAMPLES_24_BITS

        // at this point : sqrt(0.5)/2 <= Peak_mant < sqrt(0.5)
        //
        // ln(1+x) = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8 + x^9/9 - x^10/10 ...    accuracy OK if |x| < 0.5
        // sqrt(0.5)/2 <= Peak_mant < sqrt(0.5)  =>  sqrt(0.5)-1 <= 2*Peak_mant-1 < 2*sqrt(0.5)-1
        //                                       =>  ln(Peak_mant) = ln(1+x)-ln(2) with x=2*Peak_mant-1, i.e. |x| < 0.414214...

        // x=2*PeakMax_mant-1 in Q31
        // => sqrt(0.5)-1 <= x < 2*sqrt(0.5)-1
        x_32x2      = vreinterpret_s32_u32(vsub_u32(Peak_mant_u32x2, vdup_n_u32(0x80000000)));

        PeakdB_32x2 = x_32x2;                                                                     // PeakdB = x

        xn_32x2     = vqrdmulh_s32(x_32x2, x_32x2);                                               // xn = x^2
        PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vrshr_n_s32(xn_32x2, 1));                            // PeakdB = x - x^2/2

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^3
        PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x2AAAAAAB)));      // PeakdB = x - x^2/2 + x^3/3

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^4
        PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vrshr_n_s32(xn_32x2, 2));                            // PeakdB = x - x^2/2 + x^3/3 - x^4/4

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^5
        PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x1999999A)));      // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^6
        PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x15555555)));      // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^7
        PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x12492492)));      // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^8
        PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vrshr_n_s32(xn_32x2, 3));                            // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^9
        PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x0E38E38E)));      // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8 + x^9/9

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^10
        PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x0CCCCCCD)));      // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8 + x^9/9 - x^10/10

        // at this point : PeakMaxdB contains ln(1+x) in Q31

        if(RmsMeasure)
        {
            // dB(power) = 10*log10(power)

            // PeakMaxdB = 10*log10(PeakMax)+20*log10(2)*(HEADROOM+MaxShiftBits)
            //           = 10*ln(PeakMax)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 10/ln(10)*ln(PeakMax_mant*2^(-PeakMax_exp))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 10/ln(10)*(ln(PeakMax_mant)-PeakMax_exp*ln(2))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 10/ln(10)*ln(PeakMax_mant)-PeakMax_exp*10*ln(2)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 10/ln(10)*ln(PeakMax_mant)+10*ln(2)/ln(10)*(2*(HEADROOM+MaxShiftBits)-PeakMax_exp)
            //
            // => RmsdB = 10/ln(10)*ln(1+x)+10*ln(2)/ln(10)*(2*(HEADROOM+MaxShiftBits)-PeakMax_exp)
            // => RmsdB (Q16) = 0x457CB*ln(1+x)+0x302A3*(2*(HEADROOM+MaxShiftBits)-PeakMax_exp)

            // fractional mutiply 0x457CB*ln(1+x) in Q16
            PeakdB_32x2   = vqrdmulh_s32(PeakdB_32x2, vdup_n_s32(0x457CB));

            // PeakdB_exp = 2*(HEADROOM+MaxShiftBits)-PeakdB_exp
            Peak_exp_32x2 = vsub_s32(vdup_n_s32(2 * (HEADROOM + MaxShiftBits)), Peak_exp_32x2);

            // PeakMaxdB final value (integer mac 0x302A3*PeakdB_exp)
            PeakdB_32x2   = vmla_s32(PeakdB_32x2, Peak_exp_32x2, vdup_n_s32(0x302A3));
        }
        else
        {
            // dB(power) = 20*log10(abs)

            // PeakMaxdB = 20*log10(PeakMax)+20*log10(2)*(HEADROOM+MaxShiftBits)
            //           = 20*ln(PeakMax)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 20/ln(10)*ln(PeakMax_mant*2^(-PeakMax_exp))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 20/ln(10)*(ln(PeakMax_mant)-PeakMax_exp*ln(2))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 20/ln(10)*ln(PeakMax_mant)-PeakMax_exp*20*ln(2)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 20/ln(10)*ln(PeakMax_mant)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits-PeakMax_exp)
            //
            // => RmsdB = 20/ln(10)*ln(1+x)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits-PeakMax_exp)
            // => RmsdB (Q16) = 0x8AF96*ln(1+x)+0x60546*(HEADROOM+MaxShiftBits-PeakMax_exp)

            // fractional mutiply 0x8AF96*ln(1+x) in Q16
            PeakdB_32x2     = vqrdmulh_s32(PeakdB_32x2, vdup_n_s32(0x8AF96));

            // PeakdB_exp = HEADROOM+MaxShiftBits-PeakdB_exp
            Peak_exp_32x2 = vsub_s32(vdup_n_s32(HEADROOM + MaxShiftBits), Peak_exp_32x2);

            // PeakMaxdB final value (integer mac 0x60546*PeakdB_exp)
            PeakdB_32x2     = vmla_s32(PeakdB_32x2, Peak_exp_32x2, vdup_n_s32(0x60546));
        }
        PeakdB = vget_lane_s32(PeakdB_32x2, 0);
    }
#ifdef DEBUG_LIMITER_OUTPUT
    if((debug_cpt_samples >= DEBUG_CPT_MIN) && (debug_cpt_samples <= DEBUG_CPT_MAX))
    {
        char string[100];

        debug_write_string("MRDC5B_LIMITER_PEAKMAX_PEAKDB\n");
        sprintf(string, "PeakMax=0x%012llX, HEADROOM+MaxShiftBits=%d => PeakdB=0x%06X\n",
#ifdef SAMPLES_24_BITS
                        PeakMax & 0xFFFFFFFFFFFFLL,
#else // SAMPLES_24_BITS
                        (PeakMax >> 16) & 0xFFFFFFFFFFFFLL,
#endif // SAMPLES_24_BITS
                        HEADROOM + MaxShiftBits,
                        PeakdB & 0xFFFFFF);
        debug_write_string(string);
    }
Exemplo n.º 11
0
// CHECK-LABEL: define i32 @test_vget_lane_s32(<2 x i32> %a) #0 {
// CHECK:   [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
// CHECK:   [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
// CHECK:   [[VGET_LANE:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
// CHECK:   ret i32 [[VGET_LANE]]
int32_t test_vget_lane_s32(int32x2_t a) {
  return vget_lane_s32(a, 1);
}
Exemplo n.º 12
0
void BQ_2I_D32F32C30_TRC_WRA_01 (           Biquad_Instance_t       *pInstance,
                                            LVM_INT32                    *pDataIn,
                                            LVM_INT32                    *pDataOut,
                                            LVM_INT16                    NrSamples)


    {
#if !(defined  __ARM_HAVE_NEON)
        LVM_INT32 ynL,ynR,templ,tempd;
        LVM_INT16 ii;
        PFilter_State pBiquadState = (PFilter_State) pInstance;

         for (ii = NrSamples; ii != 0; ii--)
         {


            /**************************************************************************
                            PROCESSING OF THE LEFT CHANNEL
            ***************************************************************************/
            /* ynL= ( A2 (Q30) * x(n-2)L (Q0) ) >>30 in Q0*/
            MUL32x32INTO32(pBiquadState->coefs[0],pBiquadState->pDelays[2],ynL,30)

            /* ynL+= ( A1 (Q30) * x(n-1)L (Q0) ) >> 30 in Q0*/
            MUL32x32INTO32(pBiquadState->coefs[1],pBiquadState->pDelays[0],templ,30)
            ynL+=templ;

            /* ynL+= ( A0 (Q30) * x(n)L (Q0) ) >> 30 in Q0*/
            MUL32x32INTO32(pBiquadState->coefs[2],*pDataIn,templ,30)
            ynL+=templ;

             /* ynL+= (-B2 (Q30) * y(n-2)L (Q0) ) >> 30 in Q0*/
            MUL32x32INTO32(pBiquadState->coefs[3],pBiquadState->pDelays[6],templ,30)
            ynL+=templ;

            /* ynL+= (-B1 (Q30) * y(n-1)L (Q0) ) >> 30 in Q0 */
            MUL32x32INTO32(pBiquadState->coefs[4],pBiquadState->pDelays[4],templ,30)
            ynL+=templ;

            /**************************************************************************
                            PROCESSING OF THE RIGHT CHANNEL
            ***************************************************************************/
            /* ynR= ( A2 (Q30) * x(n-2)R (Q0) ) >> 30 in Q0*/
            MUL32x32INTO32(pBiquadState->coefs[0],pBiquadState->pDelays[3],ynR,30)

            /* ynR+= ( A1 (Q30) * x(n-1)R (Q0) ) >> 30  in Q0*/
            MUL32x32INTO32(pBiquadState->coefs[1],pBiquadState->pDelays[1],templ,30)
            ynR+=templ;

            /* ynR+= ( A0 (Q30) * x(n)R (Q0) ) >> 30 in Q0*/
            tempd=*(pDataIn+1);
            MUL32x32INTO32(pBiquadState->coefs[2],tempd,templ,30)
            ynR+=templ;

            /* ynR+= (-B2 (Q30) * y(n-2)R (Q0) ) >> 30 in Q0*/
            MUL32x32INTO32(pBiquadState->coefs[3],pBiquadState->pDelays[7],templ,30)
            ynR+=templ;

            /* ynR+= (-B1 (Q30) * y(n-1)R (Q0) ) >> 30 in Q0 */
            MUL32x32INTO32(pBiquadState->coefs[4],pBiquadState->pDelays[5],templ,30)
            ynR+=templ;

            /**************************************************************************
                            UPDATING THE DELAYS
            ***************************************************************************/
            pBiquadState->pDelays[7]=pBiquadState->pDelays[5]; /* y(n-2)R=y(n-1)R*/
            pBiquadState->pDelays[6]=pBiquadState->pDelays[4]; /* y(n-2)L=y(n-1)L*/
            pBiquadState->pDelays[3]=pBiquadState->pDelays[1]; /* x(n-2)R=x(n-1)R*/
            pBiquadState->pDelays[2]=pBiquadState->pDelays[0]; /* x(n-2)L=x(n-1)L*/
            pBiquadState->pDelays[5]=(LVM_INT32)ynR; /* Update y(n-1)R in Q0*/
            pBiquadState->pDelays[4]=(LVM_INT32)ynL; /* Update y(n-1)L in Q0*/
            pBiquadState->pDelays[0]=(*pDataIn); /* Update x(n-1)L in Q0*/
            pDataIn++;
            pBiquadState->pDelays[1]=(*pDataIn); /* Update x(n-1)R in Q0*/
            pDataIn++;

            /**************************************************************************
                            WRITING THE OUTPUT
            ***************************************************************************/
            *pDataOut=(LVM_INT32)ynL; /* Write Left output in Q0*/
            pDataOut++;
            *pDataOut=(LVM_INT32)ynR; /* Write Right ouput in Q0*/
            pDataOut++;


        }
#else
        LVM_INT16 ii=0;
	      
		PFilter_State pBiquadState = (PFilter_State) pInstance;

		int32x2_t A2 = vdup_n_s32(pBiquadState->coefs[0]);
		int32x2_t A1 = vdup_n_s32(pBiquadState->coefs[1]);
		int32x2_t A0 = vdup_n_s32(pBiquadState->coefs[2]);
		int32x2_t B2 = vdup_n_s32(pBiquadState->coefs[3]);
		int32x2_t B1 = vdup_n_s32(pBiquadState->coefs[4]);
		
		int32x2_t X_2 = vld1_s32(&pBiquadState->pDelays[2]);
		int32x2_t X_1 = vld1_s32(&pBiquadState->pDelays[0]);
		int32x2_t Y_2 = vld1_s32(&pBiquadState->pDelays[6]);
		int32x2_t Y_1 = vld1_s32(&pBiquadState->pDelays[4]);

		for(ii=0; ii<NrSamples; ii++){
		  int32x2_t s = vld1_s32(pDataIn);
		  int64x2_t r = vmull_s32(A2, X_2);
		  r = vmlal_s32(r, A1, X_1);
		  r = vmlal_s32(r, A0, s);
		  r = vmlal_s32(r, B2, Y_2);
		  r = vmlal_s32(r, B1, Y_1);
		  int32_t ll =(int32_t)( vgetq_lane_s64(r, 0) >> 30);
		  int32_t rr =(int32_t)( vgetq_lane_s64(r, 1) >> 30);
		  pDataIn += 2;
		  *pDataOut ++ = ll;
		  *pDataOut ++ = rr;
		  int32_t tmp1, tmp2;
		  tmp1 = vget_lane_s32(X_1, 0);
		  tmp2 = vget_lane_s32(X_1, 1);
		  vset_lane_s32(tmp1, X_2, 0);
		  vset_lane_s32(tmp2, X_2, 1);
		  tmp1 = vget_lane_s32(Y_1, 0);
		  tmp2 = vget_lane_s32(Y_1, 1);
		  vset_lane_s32(tmp1, Y_2, 0);
		  vset_lane_s32(tmp2, Y_2, 1);

		  vset_lane_s32(ll, Y_1, 0);
		  vset_lane_s32(rr, Y_1, 1);
		  
		  tmp1 = vget_lane_s32(s, 0);
		  tmp2 = vget_lane_s32(s, 1);
		  vset_lane_s32(tmp1, X_1, 0);
		  vset_lane_s32(tmp2, X_1, 1);
		}
        vst1_s32(&pBiquadState->pDelays[2], X_2);
        vst1_s32(&pBiquadState->pDelays[0], X_1);
        vst1_s32(&pBiquadState->pDelays[6], Y_2);
        vst1_s32(&pBiquadState->pDelays[4], Y_1);
#endif         

    }