示例#1
0
static void SubbandCoherenceNEON(AecCore* aec,
                                 float efw[2][PART_LEN1],
                                 float dfw[2][PART_LEN1],
                                 float xfw[2][PART_LEN1],
                                 float* fft,
                                 float* cohde,
                                 float* cohxd,
                                 int* extreme_filter_divergence) {
  int i;

  SmoothedPSD(aec, efw, dfw, xfw, extreme_filter_divergence);

  {
    const float32x4_t vec_1eminus10 =  vdupq_n_f32(1e-10f);

    // Subband coherence
    for (i = 0; i + 3 < PART_LEN1; i += 4) {
      const float32x4_t vec_sd = vld1q_f32(&aec->sd[i]);
      const float32x4_t vec_se = vld1q_f32(&aec->se[i]);
      const float32x4_t vec_sx = vld1q_f32(&aec->sx[i]);
      const float32x4_t vec_sdse = vmlaq_f32(vec_1eminus10, vec_sd, vec_se);
      const float32x4_t vec_sdsx = vmlaq_f32(vec_1eminus10, vec_sd, vec_sx);
      float32x4x2_t vec_sde = vld2q_f32(&aec->sde[i][0]);
      float32x4x2_t vec_sxd = vld2q_f32(&aec->sxd[i][0]);
      float32x4_t vec_cohde = vmulq_f32(vec_sde.val[0], vec_sde.val[0]);
      float32x4_t vec_cohxd = vmulq_f32(vec_sxd.val[0], vec_sxd.val[0]);
      vec_cohde = vmlaq_f32(vec_cohde, vec_sde.val[1], vec_sde.val[1]);
      vec_cohde = vdivq_f32(vec_cohde, vec_sdse);
      vec_cohxd = vmlaq_f32(vec_cohxd, vec_sxd.val[1], vec_sxd.val[1]);
      vec_cohxd = vdivq_f32(vec_cohxd, vec_sdsx);

      vst1q_f32(&cohde[i], vec_cohde);
      vst1q_f32(&cohxd[i], vec_cohxd);
    }
  }
  // scalar code for the remaining items.
  for (; i < PART_LEN1; i++) {
    cohde[i] =
        (aec->sde[i][0] * aec->sde[i][0] + aec->sde[i][1] * aec->sde[i][1]) /
        (aec->sd[i] * aec->se[i] + 1e-10f);
    cohxd[i] =
        (aec->sxd[i][0] * aec->sxd[i][0] + aec->sxd[i][1] * aec->sxd[i][1]) /
        (aec->sx[i] * aec->sd[i] + 1e-10f);
  }
}
示例#2
0
// Puts fft output data into a complex valued array.
static void StoreAsComplexNEON(const float* data,
                               float data_complex[2][PART_LEN1]) {
  int i;
  for (i = 0; i < PART_LEN; i += 4) {
    const float32x4x2_t vec_data = vld2q_f32(&data[2 * i]);
    vst1q_f32(&data_complex[0][i], vec_data.val[0]);
    vst1q_f32(&data_complex[1][i], vec_data.val[1]);
  }
  // fix beginning/end values
  data_complex[1][0] = 0;
  data_complex[1][PART_LEN] = 0;
  data_complex[0][0] = data[0];
  data_complex[0][PART_LEN] = data[1];
}
示例#3
0
void test_vld2Qf32 (void)
{
  float32x4x2_t out_float32x4x2_t;

  out_float32x4x2_t = vld2q_f32 (0);
}
示例#4
0
static void rftbsub_128_neon(float* a) {
  const float* c = rdft_w + 32;
  int j1, j2;
  const float32x4_t mm_half = vdupq_n_f32(0.5f);

  a[1] = -a[1];
  // Vectorized code (four at once).
  //    Note: commented number are indexes for the first iteration of the loop.
  for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) {
    // Load 'wk'.
    const float32x4_t c_j1 = vld1q_f32(&c[j1]);         //  1,  2,  3,  4,
    const float32x4_t c_k1 = vld1q_f32(&c[29 - j1]);    // 28, 29, 30, 31,
    const float32x4_t wkrt = vsubq_f32(mm_half, c_k1);  // 28, 29, 30, 31,
    const float32x4_t wkr_ = reverse_order_f32x4(wkrt); // 31, 30, 29, 28,
    const float32x4_t wki_ = c_j1;                      //  1,  2,  3,  4,
    // Load and shuffle 'a'.
    //   2,   4,   6,   8,   3,   5,   7,   9
    float32x4x2_t a_j2_p = vld2q_f32(&a[0 + j2]);
    // 120, 122, 124, 126, 121, 123, 125, 127,
    const float32x4x2_t k2_0_4 = vld2q_f32(&a[122 - j2]);
    // 126, 124, 122, 120
    const float32x4_t a_k2_p0 = reverse_order_f32x4(k2_0_4.val[0]);
    // 127, 125, 123, 121
    const float32x4_t a_k2_p1 = reverse_order_f32x4(k2_0_4.val[1]);
    // Calculate 'x'.
    const float32x4_t xr_ = vsubq_f32(a_j2_p.val[0], a_k2_p0);
    // 2-126, 4-124, 6-122, 8-120,
    const float32x4_t xi_ = vaddq_f32(a_j2_p.val[1], a_k2_p1);
    // 3-127, 5-125, 7-123, 9-121,
    // Calculate product into 'y'.
    //    yr = wkr * xr - wki * xi;
    //    yi = wkr * xi + wki * xr;
    const float32x4_t a_ = vmulq_f32(wkr_, xr_);
    const float32x4_t b_ = vmulq_f32(wki_, xi_);
    const float32x4_t c_ = vmulq_f32(wkr_, xi_);
    const float32x4_t d_ = vmulq_f32(wki_, xr_);
    const float32x4_t yr_ = vaddq_f32(a_, b_);  // 2-126, 4-124, 6-122, 8-120,
    const float32x4_t yi_ = vsubq_f32(c_, d_);  // 3-127, 5-125, 7-123, 9-121,
                                                // Update 'a'.
                                                //    a[j2 + 0] -= yr;
                                                //    a[j2 + 1] -= yi;
                                                //    a[k2 + 0] += yr;
                                                //    a[k2 + 1] -= yi;
    // 126, 124, 122, 120,
    const float32x4_t a_k2_p0n = vaddq_f32(a_k2_p0, yr_);
    // 127, 125, 123, 121,
    const float32x4_t a_k2_p1n = vsubq_f32(yi_, a_k2_p1);
    // Shuffle in right order and store.
    //   2,   3,   4,   5,   6,   7,   8,   9,
    const float32x4_t a_k2_p0nr = vrev64q_f32(a_k2_p0n);
    const float32x4_t a_k2_p1nr = vrev64q_f32(a_k2_p1n);
    // 124, 125, 126, 127, 120, 121, 122, 123
    const float32x4x2_t a_k2_n = vzipq_f32(a_k2_p0nr, a_k2_p1nr);
    //   2,   4,   6,   8,
    a_j2_p.val[0] = vsubq_f32(a_j2_p.val[0], yr_);
    //   3,   5,   7,   9,
    a_j2_p.val[1] = vsubq_f32(yi_, a_j2_p.val[1]);
    //   2,   3,   4,   5,   6,   7,   8,   9,
    vst2q_f32(&a[0 + j2], a_j2_p);

    vst1q_f32(&a[122 - j2], a_k2_n.val[1]);
    vst1q_f32(&a[126 - j2], a_k2_n.val[0]);
  }

  // Scalar code for the remaining items.
  for (; j2 < 64; j1 += 1, j2 += 2) {
    const int k2 = 128 - j2;
    const int k1 = 32 - j1;
    const float wkr = 0.5f - c[k1];
    const float wki = c[j1];
    const float xr = a[j2 + 0] - a[k2 + 0];
    const float xi = a[j2 + 1] + a[k2 + 1];
    const float yr = wkr * xr + wki * xi;
    const float yi = wkr * xi - wki * xr;
    a[j2 + 0] = a[j2 + 0] - yr;
    a[j2 + 1] = yi - a[j2 + 1];
    a[k2 + 0] = yr + a[k2 + 0];
    a[k2 + 1] = yi - a[k2 + 1];
  }
  a[65] = -a[65];
}
void fft_real_neon(
        CkFftContext* context, 
        const float* input, 
        CkFftComplex* output, 
        int count)
{
    int countDiv2 = count/2;

    fft_neon(context, (const CkFftComplex*) input, output, countDiv2, false, 1, context->fwdExpTable, context->maxCount / countDiv2);

    output[countDiv2] = output[0];

    int expTableStride = context->maxCount/count;
    const CkFftComplex* exp0 = context->fwdExpTable;
    const CkFftComplex* exp1 = context->fwdExpTable + countDiv2 * expTableStride;

    CkFftComplex* p0 = output;
    CkFftComplex* p1 = output + countDiv2 - 3;
    const CkFftComplex* pEnd = p0 + count/4;
    while (p0 < pEnd)
    {
        float32x4x2_t z0_v = vld2q_f32((const float32_t*) p0);
        float32x4x2_t z1_v = vld2q_f32((const float32_t*) p1);

        float32x2_t hi, lo;

        // reverse z1 real
        z1_v.val[0] = vrev64q_f32(z1_v.val[0]);
        hi = vget_high_f32(z1_v.val[0]);
        lo = vget_low_f32(z1_v.val[0]);
        z1_v.val[0] = vcombine_f32(hi, lo);

        // reverse z1 imaginary
        z1_v.val[1] = vrev64q_f32(z1_v.val[1]);
        hi = vget_high_f32(z1_v.val[1]);
        lo = vget_low_f32(z1_v.val[1]);
        z1_v.val[1] = vcombine_f32(hi, lo);

        float32x4x2_t sum_v;
        sum_v.val[0] = vaddq_f32(z0_v.val[0], z1_v.val[0]);
        sum_v.val[1] = vsubq_f32(z0_v.val[1], z1_v.val[1]);

        float32x4x2_t diff_v;
        diff_v.val[0] = vsubq_f32(z0_v.val[0], z1_v.val[0]);
        diff_v.val[1] = vaddq_f32(z0_v.val[1], z1_v.val[1]);

        float32x4x2_t exp_v;
        exp_v = vld2q_lane_f32((const float32_t*) exp0, exp_v, 0);
        exp0 += expTableStride;
        exp_v = vld2q_lane_f32((const float32_t*) exp0, exp_v, 1);
        exp0 += expTableStride;
        exp_v = vld2q_lane_f32((const float32_t*) exp0, exp_v, 2);
        exp0 += expTableStride;
        exp_v = vld2q_lane_f32((const float32_t*) exp0, exp_v, 3);
        exp0 += expTableStride;

        float32x4x2_t f_v;
        f_v.val[0] = vnegq_f32(exp_v.val[1]);
        f_v.val[1] = exp_v.val[0];

        float32x4x2_t c_v;
        multiply(f_v, diff_v, c_v);
        subtract(sum_v, c_v, z0_v);
        vst2q_f32((float32_t*) p0, z0_v);

        diff_v.val[0] = vnegq_f32(diff_v.val[0]);
        sum_v.val[1] = vnegq_f32(sum_v.val[1]);

        exp_v = vld2q_lane_f32((const float32_t*) exp1, exp_v, 0);
        exp1 -= expTableStride;
        exp_v = vld2q_lane_f32((const float32_t*) exp1, exp_v, 1);
        exp1 -= expTableStride;
        exp_v = vld2q_lane_f32((const float32_t*) exp1, exp_v, 2);
        exp1 -= expTableStride;
        exp_v = vld2q_lane_f32((const float32_t*) exp1, exp_v, 3);
        exp1 -= expTableStride;

        f_v.val[0] = vnegq_f32(exp_v.val[1]);
        f_v.val[1] = exp_v.val[0];

        multiply(f_v, diff_v, c_v);
        subtract(sum_v, c_v, z1_v);

        // reverse z1 real
        z1_v.val[0] = vrev64q_f32(z1_v.val[0]);
        hi = vget_high_f32(z1_v.val[0]);
        lo = vget_low_f32(z1_v.val[0]);
        z1_v.val[0] = vcombine_f32(hi, lo);

        // reverse z1 imaginary
        z1_v.val[1] = vrev64q_f32(z1_v.val[1]);
        hi = vget_high_f32(z1_v.val[1]);
        lo = vget_low_f32(z1_v.val[1]);
        z1_v.val[1] = vcombine_f32(hi, lo);

        vst2q_f32((float32_t*) p1, z1_v);

        p0 += 4;
        p1 -= 4;
    }

    if (count > 8)
    {
        // middle:
        p0->real = p0->real * 2.0f;
        p0->imag = -p0->imag * 2.0f;
    }
}
void fft_real_inverse_neon(
        CkFftContext* context, 
        const CkFftComplex* input, 
        float* output, 
        int count,
        CkFftComplex* tmpBuf)
{
    int countDiv2 = count/2;

    int expTableStride = context->maxCount/count;
    const CkFftComplex* exp0 = context->invExpTable;
    const CkFftComplex* exp1 = context->invExpTable + countDiv2 * expTableStride;

    const CkFftComplex* p0 = input;
    const CkFftComplex* p1 = input + countDiv2 - 3;
    CkFftComplex* tmp0 = tmpBuf;
    CkFftComplex* tmp1 = tmpBuf + countDiv2 - 3;
    const CkFftComplex* pEnd = p0 + count/4;
    while (p0 < pEnd)
    {
        float32x4x2_t z0_v = vld2q_f32((const float32_t*) p0);
        float32x4x2_t z1_v = vld2q_f32((const float32_t*) p1);

        float32x2_t hi, lo;

        // reverse z1 real
        z1_v.val[0] = vrev64q_f32(z1_v.val[0]);
        hi = vget_high_f32(z1_v.val[0]);
        lo = vget_low_f32(z1_v.val[0]);
        z1_v.val[0] = vcombine_f32(hi, lo);

        // reverse z1 imaginary
        z1_v.val[1] = vrev64q_f32(z1_v.val[1]);
        hi = vget_high_f32(z1_v.val[1]);
        lo = vget_low_f32(z1_v.val[1]);
        z1_v.val[1] = vcombine_f32(hi, lo);

        float32x4x2_t sum_v;
        sum_v.val[0] = vaddq_f32(z0_v.val[0], z1_v.val[0]);
        sum_v.val[1] = vsubq_f32(z0_v.val[1], z1_v.val[1]);

        float32x4x2_t diff_v;
        diff_v.val[0] = vsubq_f32(z0_v.val[0], z1_v.val[0]);
        diff_v.val[1] = vaddq_f32(z0_v.val[1], z1_v.val[1]);

        float32x4x2_t exp_v;
        exp_v = vld2q_lane_f32((const float32_t*) exp0, exp_v, 0);
        exp0 += expTableStride;
        exp_v = vld2q_lane_f32((const float32_t*) exp0, exp_v, 1);
        exp0 += expTableStride;
        exp_v = vld2q_lane_f32((const float32_t*) exp0, exp_v, 2);
        exp0 += expTableStride;
        exp_v = vld2q_lane_f32((const float32_t*) exp0, exp_v, 3);
        exp0 += expTableStride;

        float32x4x2_t f_v;
        f_v.val[0] = vnegq_f32(exp_v.val[1]);
        f_v.val[1] = exp_v.val[0];

        float32x4x2_t c_v;
        multiply(f_v, diff_v, c_v);
        add(sum_v, c_v, z0_v);
        vst2q_f32((float32_t*) tmp0, z0_v);

        diff_v.val[0] = vnegq_f32(diff_v.val[0]);
        sum_v.val[1] = vnegq_f32(sum_v.val[1]);

        exp_v = vld2q_lane_f32((const float32_t*) exp1, exp_v, 0);
        exp1 -= expTableStride;
        exp_v = vld2q_lane_f32((const float32_t*) exp1, exp_v, 1);
        exp1 -= expTableStride;
        exp_v = vld2q_lane_f32((const float32_t*) exp1, exp_v, 2);
        exp1 -= expTableStride;
        exp_v = vld2q_lane_f32((const float32_t*) exp1, exp_v, 3);
        exp1 -= expTableStride;

        f_v.val[0] = vnegq_f32(exp_v.val[1]);
        f_v.val[1] = exp_v.val[0];

        multiply(f_v, diff_v, c_v);
        add(sum_v, c_v, z1_v);

        // reverse z1 real
        z1_v.val[0] = vrev64q_f32(z1_v.val[0]);
        hi = vget_high_f32(z1_v.val[0]);
        lo = vget_low_f32(z1_v.val[0]);
        z1_v.val[0] = vcombine_f32(hi, lo);

        // reverse z1 imaginary
        z1_v.val[1] = vrev64q_f32(z1_v.val[1]);
        hi = vget_high_f32(z1_v.val[1]);
        lo = vget_low_f32(z1_v.val[1]);
        z1_v.val[1] = vcombine_f32(hi, lo);

        vst2q_f32((float32_t*) tmp1, z1_v);

        p0 += 4;
        tmp0 += 4;
        p1 -= 4;
        tmp1 -= 4;
    }

    // middle:
    tmp0->real = p0->real * 2.0f;
    tmp0->imag = -p0->imag * 2.0f;

    fft_neon(context, tmpBuf, (CkFftComplex*) output, countDiv2, true, 1, context->invExpTable, context->maxCount / countDiv2);
}
示例#7
0
inline float32x4x2_t vld2q(const f32 * ptr) { return vld2q_f32(ptr); }
示例#8
0
// Updates the following smoothed  Power Spectral Densities (PSD):
//  - sd  : near-end
//  - se  : residual echo
//  - sx  : far-end
//  - sde : cross-PSD of near-end and residual echo
//  - sxd : cross-PSD of near-end and far-end
//
// In addition to updating the PSDs, also the filter diverge state is determined
// upon actions are taken.
static void SmoothedPSD(AecCore* aec,
                        float efw[2][PART_LEN1],
                        float dfw[2][PART_LEN1],
                        float xfw[2][PART_LEN1],
                        int* extreme_filter_divergence) {
  // Power estimate smoothing coefficients.
  const float* ptrGCoh = aec->extended_filter_enabled
      ? WebRtcAec_kExtendedSmoothingCoefficients[aec->mult - 1]
      : WebRtcAec_kNormalSmoothingCoefficients[aec->mult - 1];
  int i;
  float sdSum = 0, seSum = 0;
  const float32x4_t vec_15 =  vdupq_n_f32(WebRtcAec_kMinFarendPSD);
  float32x4_t vec_sdSum = vdupq_n_f32(0.0f);
  float32x4_t vec_seSum = vdupq_n_f32(0.0f);

  for (i = 0; i + 3 < PART_LEN1; i += 4) {
    const float32x4_t vec_dfw0 = vld1q_f32(&dfw[0][i]);
    const float32x4_t vec_dfw1 = vld1q_f32(&dfw[1][i]);
    const float32x4_t vec_efw0 = vld1q_f32(&efw[0][i]);
    const float32x4_t vec_efw1 = vld1q_f32(&efw[1][i]);
    const float32x4_t vec_xfw0 = vld1q_f32(&xfw[0][i]);
    const float32x4_t vec_xfw1 = vld1q_f32(&xfw[1][i]);
    float32x4_t vec_sd = vmulq_n_f32(vld1q_f32(&aec->sd[i]), ptrGCoh[0]);
    float32x4_t vec_se = vmulq_n_f32(vld1q_f32(&aec->se[i]), ptrGCoh[0]);
    float32x4_t vec_sx = vmulq_n_f32(vld1q_f32(&aec->sx[i]), ptrGCoh[0]);
    float32x4_t vec_dfw_sumsq = vmulq_f32(vec_dfw0, vec_dfw0);
    float32x4_t vec_efw_sumsq = vmulq_f32(vec_efw0, vec_efw0);
    float32x4_t vec_xfw_sumsq = vmulq_f32(vec_xfw0, vec_xfw0);

    vec_dfw_sumsq = vmlaq_f32(vec_dfw_sumsq, vec_dfw1, vec_dfw1);
    vec_efw_sumsq = vmlaq_f32(vec_efw_sumsq, vec_efw1, vec_efw1);
    vec_xfw_sumsq = vmlaq_f32(vec_xfw_sumsq, vec_xfw1, vec_xfw1);
    vec_xfw_sumsq = vmaxq_f32(vec_xfw_sumsq, vec_15);
    vec_sd = vmlaq_n_f32(vec_sd, vec_dfw_sumsq, ptrGCoh[1]);
    vec_se = vmlaq_n_f32(vec_se, vec_efw_sumsq, ptrGCoh[1]);
    vec_sx = vmlaq_n_f32(vec_sx, vec_xfw_sumsq, ptrGCoh[1]);

    vst1q_f32(&aec->sd[i], vec_sd);
    vst1q_f32(&aec->se[i], vec_se);
    vst1q_f32(&aec->sx[i], vec_sx);

    {
      float32x4x2_t vec_sde = vld2q_f32(&aec->sde[i][0]);
      float32x4_t vec_dfwefw0011 = vmulq_f32(vec_dfw0, vec_efw0);
      float32x4_t vec_dfwefw0110 = vmulq_f32(vec_dfw0, vec_efw1);
      vec_sde.val[0] = vmulq_n_f32(vec_sde.val[0], ptrGCoh[0]);
      vec_sde.val[1] = vmulq_n_f32(vec_sde.val[1], ptrGCoh[0]);
      vec_dfwefw0011 = vmlaq_f32(vec_dfwefw0011, vec_dfw1, vec_efw1);
      vec_dfwefw0110 = vmlsq_f32(vec_dfwefw0110, vec_dfw1, vec_efw0);
      vec_sde.val[0] = vmlaq_n_f32(vec_sde.val[0], vec_dfwefw0011, ptrGCoh[1]);
      vec_sde.val[1] = vmlaq_n_f32(vec_sde.val[1], vec_dfwefw0110, ptrGCoh[1]);
      vst2q_f32(&aec->sde[i][0], vec_sde);
    }

    {
      float32x4x2_t vec_sxd = vld2q_f32(&aec->sxd[i][0]);
      float32x4_t vec_dfwxfw0011 = vmulq_f32(vec_dfw0, vec_xfw0);
      float32x4_t vec_dfwxfw0110 = vmulq_f32(vec_dfw0, vec_xfw1);
      vec_sxd.val[0] = vmulq_n_f32(vec_sxd.val[0], ptrGCoh[0]);
      vec_sxd.val[1] = vmulq_n_f32(vec_sxd.val[1], ptrGCoh[0]);
      vec_dfwxfw0011 = vmlaq_f32(vec_dfwxfw0011, vec_dfw1, vec_xfw1);
      vec_dfwxfw0110 = vmlsq_f32(vec_dfwxfw0110, vec_dfw1, vec_xfw0);
      vec_sxd.val[0] = vmlaq_n_f32(vec_sxd.val[0], vec_dfwxfw0011, ptrGCoh[1]);
      vec_sxd.val[1] = vmlaq_n_f32(vec_sxd.val[1], vec_dfwxfw0110, ptrGCoh[1]);
      vst2q_f32(&aec->sxd[i][0], vec_sxd);
    }

    vec_sdSum = vaddq_f32(vec_sdSum, vec_sd);
    vec_seSum = vaddq_f32(vec_seSum, vec_se);
  }
  {
    float32x2_t vec_sdSum_total;
    float32x2_t vec_seSum_total;
    // A B C D
    vec_sdSum_total = vpadd_f32(vget_low_f32(vec_sdSum),
                                vget_high_f32(vec_sdSum));
    vec_seSum_total = vpadd_f32(vget_low_f32(vec_seSum),
                                vget_high_f32(vec_seSum));
    // A+B C+D
    vec_sdSum_total = vpadd_f32(vec_sdSum_total, vec_sdSum_total);
    vec_seSum_total = vpadd_f32(vec_seSum_total, vec_seSum_total);
    // A+B+C+D A+B+C+D
    sdSum = vget_lane_f32(vec_sdSum_total, 0);
    seSum = vget_lane_f32(vec_seSum_total, 0);
  }

  // scalar code for the remaining items.
  for (; i < PART_LEN1; i++) {
    aec->sd[i] = ptrGCoh[0] * aec->sd[i] +
                 ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]);
    aec->se[i] = ptrGCoh[0] * aec->se[i] +
                 ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]);
    // We threshold here to protect against the ill-effects of a zero farend.
    // The threshold is not arbitrarily chosen, but balances protection and
    // adverse interaction with the algorithm's tuning.
    // TODO(bjornv): investigate further why this is so sensitive.
    aec->sx[i] =
        ptrGCoh[0] * aec->sx[i] +
        ptrGCoh[1] * WEBRTC_SPL_MAX(
            xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i],
            WebRtcAec_kMinFarendPSD);

    aec->sde[i][0] =
        ptrGCoh[0] * aec->sde[i][0] +
        ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]);
    aec->sde[i][1] =
        ptrGCoh[0] * aec->sde[i][1] +
        ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]);

    aec->sxd[i][0] =
        ptrGCoh[0] * aec->sxd[i][0] +
        ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]);
    aec->sxd[i][1] =
        ptrGCoh[0] * aec->sxd[i][1] +
        ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]);

    sdSum += aec->sd[i];
    seSum += aec->se[i];
  }

  // Divergent filter safeguard update.
  aec->divergeState = (aec->divergeState ? 1.05f : 1.0f) * seSum > sdSum;

  // Signal extreme filter divergence if the error is significantly larger
  // than the nearend (13 dB).
  *extreme_filter_divergence = (seSum > (19.95f * sdSum));
}
示例#9
0
static void ne10_fft_split_c2r_1d_float32_neon (ne10_fft_cpx_float32_t *dst,
        const ne10_fft_cpx_float32_t *src,
        ne10_fft_cpx_float32_t *twiddles,
        ne10_int32_t ncfft)
{

    ne10_int32_t k;
    ne10_int32_t count = ncfft / 2;
    ne10_fft_cpx_float32_t fk, fnkc, fek, fok, tmp;
    float32x4x2_t q2_fk, q2_fnkc, q2_tw, q2_dst, q2_dst2;
    float32x4_t q_fnkc_r, q_fnkc_i;
    float32x4_t q_fek_r, q_fek_i, q_fok_r, q_fok_i;
    float32x4_t q_tmp0, q_tmp1, q_tmp2, q_tmp3, q_val;
    float32x4_t q_dst2_r, q_dst2_i;
    float32_t *p_src, *p_src2, *p_dst, *p_dst2, *p_twiddles;

    dst[0].r = (src[0].r + src[ncfft].r) * 0.5f;
    dst[0].i = (src[0].r - src[ncfft].r) * 0.5f;

    if (count >= 4)
    {
        for (k = 1; k <= count ; k += 4)
        {
            p_src  = (float32_t*) (& (src[k]));
            p_src2  = (float32_t*) (& (src[ncfft - k - 3]));
            p_twiddles  = (float32_t*) (& (twiddles[k - 1]));
            p_dst  = (float32_t*) (& (dst[k]));
            p_dst2  = (float32_t*) (& (dst[ncfft - k - 3]));

            q2_fk  = vld2q_f32 (p_src);
            q2_fnkc = vld2q_f32 (p_src2);
            q2_tw = vld2q_f32 (p_twiddles);
            q2_fnkc.val[0] = vrev64q_f32 (q2_fnkc.val[0]);
            q2_fnkc.val[1] = vrev64q_f32 (q2_fnkc.val[1]);
            q_fnkc_r = vcombine_f32 (vget_high_f32 (q2_fnkc.val[0]), vget_low_f32 (q2_fnkc.val[0]));
            q_fnkc_i = vcombine_f32 (vget_high_f32 (q2_fnkc.val[1]), vget_low_f32 (q2_fnkc.val[1]));
            q_fnkc_i = vnegq_f32 (q_fnkc_i);

            q_fek_r = vaddq_f32 (q2_fk.val[0], q_fnkc_r);
            q_fek_i = vaddq_f32 (q2_fk.val[1], q_fnkc_i);

            q_tmp0 = vsubq_f32 (q2_fk.val[0], q_fnkc_r);
            q_tmp1 = vsubq_f32 (q2_fk.val[1], q_fnkc_i);

            q_fok_r = vmulq_f32 (q_tmp0, q2_tw.val[0]);
            q_fok_i = vmulq_f32 (q_tmp1, q2_tw.val[0]);
            q_tmp2 = vmulq_f32 (q_tmp1, q2_tw.val[1]);
            q_tmp3 = vmulq_f32 (q_tmp0, q2_tw.val[1]);
            q_fok_r = vaddq_f32 (q_fok_r, q_tmp2);
            q_fok_i = vsubq_f32 (q_fok_i, q_tmp3);

            q_val = vdupq_n_f32 (0.5f);
            q_dst2_r = vsubq_f32 (q_fek_r, q_fok_r);
            q_dst2_i = vsubq_f32 (q_fok_i, q_fek_i);
            q2_dst.val[0] = vaddq_f32 (q_fek_r, q_fok_r);
            q2_dst.val[1] = vaddq_f32 (q_fek_i, q_fok_i);
            q_dst2_r = vmulq_f32 (q_dst2_r, q_val);
            q_dst2_i = vmulq_f32 (q_dst2_i, q_val);
            q2_dst.val[0] = vmulq_f32 (q2_dst.val[0], q_val);
            q2_dst.val[1] = vmulq_f32 (q2_dst.val[1], q_val);
            q_dst2_r = vrev64q_f32 (q_dst2_r);
            q_dst2_i = vrev64q_f32 (q_dst2_i);
            q2_dst2.val[0] = vcombine_f32 (vget_high_f32 (q_dst2_r), vget_low_f32 (q_dst2_r));
            q2_dst2.val[1] = vcombine_f32 (vget_high_f32 (q_dst2_i), vget_low_f32 (q_dst2_i));
            vst2q_f32 (p_dst, q2_dst);
            vst2q_f32 (p_dst2, q2_dst2);

        }
    }
    else
    {
        for (k = 1; k <= count ; k++)
        {
            fk = src[k];
            fnkc.r = src[ncfft - k].r;
            fnkc.i = -src[ncfft - k].i;

            fek.r = fk.r + fnkc.r;
            fek.i = fk.i + fnkc.i;

            tmp.r = fk.r - fnkc.r;
            tmp.i = fk.i - fnkc.i;

            fok.r = tmp.r * twiddles[k - 1].r + tmp.i * twiddles[k - 1].i;
            fok.i = tmp.i * twiddles[k - 1].r - tmp.r * twiddles[k - 1].i;

            dst[k].r = (fek.r + fok.r) * 0.5f;
            dst[k].i = (fek.i + fok.i) * 0.5f;

            dst[ncfft - k].r = (fek.r - fok.r) * 0.5f;
            dst[ncfft - k].i = (fok.i - fek.i) * 0.5f;
        }
    }
}
示例#10
0
static void ne10_fft_split_r2c_1d_float32_neon (ne10_fft_cpx_float32_t *dst,
        const ne10_fft_cpx_float32_t *src,
        ne10_fft_cpx_float32_t *twiddles,
        ne10_int32_t ncfft)
{
    ne10_int32_t k;
    ne10_int32_t count = ncfft / 2;
    ne10_fft_cpx_float32_t fpnk, fpk, f1k, f2k, tw, tdc;
    float32x4x2_t q2_fpk, q2_fpnk, q2_tw, q2_dst, q2_dst2;
    float32x4_t q_fpnk_r, q_fpnk_i;
    float32x4_t q_f1k_r, q_f1k_i, q_f2k_r, q_f2k_i;
    float32x4_t q_tw_r, q_tw_i;
    float32x4_t q_tmp0, q_tmp1, q_tmp2, q_tmp3, q_val;
    float32x4_t q_dst_r, q_dst_i, q_dst2_r, q_dst2_i;
    float32_t *p_src, *p_src2, *p_dst, *p_dst2, *p_twiddles;

    tdc.r = src[0].r;
    tdc.i = src[0].i;

    dst[0].r = tdc.r + tdc.i;
    dst[ncfft].r = tdc.r - tdc.i;
    dst[ncfft].i = dst[0].i = 0;

    if (count >= 4)
    {
        for (k = 1; k <= count ; k += 4)
        {
            p_src  = (float32_t*) (& (src[k]));
            p_src2  = (float32_t*) (& (src[ncfft - k - 3]));
            p_twiddles  = (float32_t*) (& (twiddles[k - 1]));
            p_dst  = (float32_t*) (& (dst[k]));
            p_dst2  = (float32_t*) (& (dst[ncfft - k - 3]));

            q2_fpk  = vld2q_f32 (p_src);
            q2_fpnk = vld2q_f32 (p_src2);
            q2_tw = vld2q_f32 (p_twiddles);
            q2_fpnk.val[0] = vrev64q_f32 (q2_fpnk.val[0]);
            q2_fpnk.val[1] = vrev64q_f32 (q2_fpnk.val[1]);
            q_fpnk_r = vcombine_f32 (vget_high_f32 (q2_fpnk.val[0]), vget_low_f32 (q2_fpnk.val[0]));
            q_fpnk_i = vcombine_f32 (vget_high_f32 (q2_fpnk.val[1]), vget_low_f32 (q2_fpnk.val[1]));
            q_fpnk_i = vnegq_f32 (q_fpnk_i);

            q_f1k_r = vaddq_f32 (q2_fpk.val[0], q_fpnk_r);
            q_f1k_i = vaddq_f32 (q2_fpk.val[1], q_fpnk_i);

            q_f2k_r = vsubq_f32 (q2_fpk.val[0], q_fpnk_r);
            q_f2k_i = vsubq_f32 (q2_fpk.val[1], q_fpnk_i);

            q_tmp0 = vmulq_f32 (q_f2k_r, q2_tw.val[0]);
            q_tmp1 = vmulq_f32 (q_f2k_i, q2_tw.val[1]);
            q_tmp2 = vmulq_f32 (q_f2k_r, q2_tw.val[1]);
            q_tmp3 = vmulq_f32 (q_f2k_i, q2_tw.val[0]);
            q_tw_r = vsubq_f32 (q_tmp0, q_tmp1);
            q_tw_i = vaddq_f32 (q_tmp2, q_tmp3);

            q_val = vdupq_n_f32 (0.5f);
            q_dst2_r = vsubq_f32 (q_f1k_r, q_tw_r);
            q_dst2_i = vsubq_f32 (q_tw_i, q_f1k_i);
            q_dst_r = vaddq_f32 (q_f1k_r, q_tw_r);
            q_dst_i = vaddq_f32 (q_f1k_i, q_tw_i);
            q_dst2_r = vmulq_f32 (q_dst2_r, q_val);
            q_dst2_i = vmulq_f32 (q_dst2_i, q_val);
            q2_dst.val[0] = vmulq_f32 (q_dst_r, q_val);
            q2_dst.val[1] = vmulq_f32 (q_dst_i, q_val);
            q_dst2_r = vrev64q_f32 (q_dst2_r);
            q_dst2_i = vrev64q_f32 (q_dst2_i);
            q2_dst2.val[0] = vcombine_f32 (vget_high_f32 (q_dst2_r), vget_low_f32 (q_dst2_r));
            q2_dst2.val[1] = vcombine_f32 (vget_high_f32 (q_dst2_i), vget_low_f32 (q_dst2_i));
            vst2q_f32 (p_dst, q2_dst);
            vst2q_f32 (p_dst2, q2_dst2);

        }
    }
    else
    {
        for (k = 1; k <= count ; k++)
        {
            fpk    = src[k];
            fpnk.r =   src[ncfft - k].r;
            fpnk.i = - src[ncfft - k].i;

            f1k.r = fpk.r + fpnk.r;
            f1k.i = fpk.i + fpnk.i;

            f2k.r = fpk.r - fpnk.r;
            f2k.i = fpk.i - fpnk.i;

            tw.r = f2k.r * (twiddles[k - 1]).r - f2k.i * (twiddles[k - 1]).i;
            tw.i = f2k.r * (twiddles[k - 1]).i + f2k.i * (twiddles[k - 1]).r;

            dst[k].r = (f1k.r + tw.r) * 0.5f;
            dst[k].i = (f1k.i + tw.i) * 0.5f;
            dst[ncfft - k].r = (f1k.r - tw.r) * 0.5f;
            dst[ncfft - k].i = (tw.i - f1k.i) * 0.5f;
        }
    }
}
示例#11
0
static void ne10_fft16_backward_float32_neon (ne10_fft_cpx_float32_t * Fout,
        ne10_fft_cpx_float32_t * Fin,
        ne10_fft_cpx_float32_t * twiddles)
{
    ne10_fft_cpx_float32_t *tw1, *tw2, *tw3;

    // the first stage
    float32_t *p_src0, *p_src4, *p_src8, *p_src12;
    float32x4x2_t q2_in_0123, q2_in_4567, q2_in_89ab, q2_in_cdef;
    float32x4_t q_t0_r,  q_t0_i, q_t1_r,  q_t1_i, q_t2_r,  q_t2_i, q_t3_r, q_t3_i;
    float32x4_t q_out_r048c,  q_out_i048c, q_out_r159d,  q_out_i159d;
    float32x4_t q_out_r26ae,  q_out_i26ae, q_out_r37bf,  q_out_i37bf;
    p_src0 = (float32_t*) (& (Fin[0]));
    p_src4 = (float32_t*) (& (Fin[4]));
    p_src8 = (float32_t*) (& (Fin[8]));
    p_src12 = (float32_t*) (& (Fin[12]));
    q2_in_0123 = vld2q_f32 (p_src0);
    q2_in_4567 = vld2q_f32 (p_src4);
    q2_in_89ab = vld2q_f32 (p_src8);
    q2_in_cdef = vld2q_f32 (p_src12);

    q_t2_r = vsubq_f32 (q2_in_0123.val[0], q2_in_89ab.val[0]);
    q_t2_i = vsubq_f32 (q2_in_0123.val[1], q2_in_89ab.val[1]);
    q_t3_r = vaddq_f32 (q2_in_0123.val[0], q2_in_89ab.val[0]);
    q_t3_i = vaddq_f32 (q2_in_0123.val[1], q2_in_89ab.val[1]);

    q_t0_r = vaddq_f32 (q2_in_4567.val[0], q2_in_cdef.val[0]);
    q_t0_i = vaddq_f32 (q2_in_4567.val[1], q2_in_cdef.val[1]);
    q_t1_r = vsubq_f32 (q2_in_4567.val[0], q2_in_cdef.val[0]);
    q_t1_i = vsubq_f32 (q2_in_4567.val[1], q2_in_cdef.val[1]);

    q_out_r26ae = vsubq_f32 (q_t3_r, q_t0_r);
    q_out_i26ae = vsubq_f32 (q_t3_i, q_t0_i);
    q_out_r048c = vaddq_f32 (q_t3_r, q_t0_r);
    q_out_i048c = vaddq_f32 (q_t3_i, q_t0_i);
    q_out_r159d = vsubq_f32 (q_t2_r, q_t1_i);
    q_out_i159d = vaddq_f32 (q_t2_i, q_t1_r);
    q_out_r37bf = vaddq_f32 (q_t2_r, q_t1_i);
    q_out_i37bf = vsubq_f32 (q_t2_i, q_t1_r);

    // second stages
    float32_t *p_dst0, *p_dst1, *p_dst2, *p_dst3;
    float32_t *p_tw1, *p_tw2, *p_tw3;
    float32x4_t q_s0_r, q_s0_i, q_s1_r, q_s1_i, q_s2_r, q_s2_i;
    float32x4_t q_s3_r, q_s3_i, q_s4_r, q_s4_i, q_s5_r, q_s5_i;
    float32x4x2_t q2_tmp_0, q2_tmp_1, q2_tmp_2, q2_tmp_3;
    float32x4_t q_in_r0123, q_in_r4567, q_in_r89ab, q_in_rcdef;
    float32x4_t q_in_i0123, q_in_i4567, q_in_i89ab, q_in_icdef;
    float32x4x2_t q2_tw1, q2_tw2, q2_tw3;
    float32x4x2_t q2_out_0123, q2_out_4567, q2_out_89ab, q2_out_cdef;
    float32x4_t q_one_by_nfft;
    tw1 = twiddles;
    tw2 = twiddles + 4;
    tw3 = twiddles + 8;
    p_dst0 = (float32_t*) (&Fout[0]);
    p_dst1 = (float32_t*) (&Fout[4]);
    p_dst2 = (float32_t*) (&Fout[8]);
    p_dst3 = (float32_t*) (&Fout[12]);
    p_tw1 = (float32_t*) tw1;
    p_tw2 = (float32_t*) tw2;
    p_tw3 = (float32_t*) tw3;
    q2_tmp_0 = vzipq_f32 (q_out_r048c, q_out_r159d);
    q2_tmp_1 = vzipq_f32 (q_out_i048c, q_out_i159d);
    q2_tmp_2 = vzipq_f32 (q_out_r26ae, q_out_r37bf);
    q2_tmp_3 = vzipq_f32 (q_out_i26ae, q_out_i37bf);
    q_in_r0123 = vcombine_f32 (vget_low_f32 (q2_tmp_0.val[0]), vget_low_f32 (q2_tmp_2.val[0]));
    q_in_i0123 = vcombine_f32 (vget_low_f32 (q2_tmp_1.val[0]), vget_low_f32 (q2_tmp_3.val[0]));
    q_in_r4567 = vcombine_f32 (vget_high_f32 (q2_tmp_0.val[0]), vget_high_f32 (q2_tmp_2.val[0]));
    q_in_i4567 = vcombine_f32 (vget_high_f32 (q2_tmp_1.val[0]), vget_high_f32 (q2_tmp_3.val[0]));
    q_in_r89ab = vcombine_f32 (vget_low_f32 (q2_tmp_0.val[1]), vget_low_f32 (q2_tmp_2.val[1]));
    q_in_i89ab = vcombine_f32 (vget_low_f32 (q2_tmp_1.val[1]), vget_low_f32 (q2_tmp_3.val[1]));
    q_in_rcdef = vcombine_f32 (vget_high_f32 (q2_tmp_0.val[1]), vget_high_f32 (q2_tmp_2.val[1]));
    q_in_icdef = vcombine_f32 (vget_high_f32 (q2_tmp_1.val[1]), vget_high_f32 (q2_tmp_3.val[1]));
    q2_tw1 = vld2q_f32 (p_tw1);
    q2_tw2 = vld2q_f32 (p_tw2);
    q2_tw3 = vld2q_f32 (p_tw3);

    q_s0_r = vmulq_f32 (q_in_r4567, q2_tw1.val[0]);
    q_s0_i = vmulq_f32 (q_in_i4567, q2_tw1.val[0]);
    q_s1_r = vmulq_f32 (q_in_r89ab, q2_tw2.val[0]);
    q_s1_i = vmulq_f32 (q_in_i89ab, q2_tw2.val[0]);
    q_s2_r = vmulq_f32 (q_in_rcdef, q2_tw3.val[0]);
    q_s2_i = vmulq_f32 (q_in_icdef, q2_tw3.val[0]);
    q_s0_r = vmlaq_f32 (q_s0_r, q_in_i4567, q2_tw1.val[1]);
    q_s0_i = vmlsq_f32 (q_s0_i, q_in_r4567, q2_tw1.val[1]);
    q_s1_r = vmlaq_f32 (q_s1_r, q_in_i89ab, q2_tw2.val[1]);
    q_s1_i = vmlsq_f32 (q_s1_i, q_in_r89ab, q2_tw2.val[1]);
    q_s2_r = vmlaq_f32 (q_s2_r, q_in_icdef, q2_tw3.val[1]);
    q_s2_i = vmlsq_f32 (q_s2_i, q_in_rcdef, q2_tw3.val[1]);

    q_s5_r = vsubq_f32 (q_in_r0123, q_s1_r);
    q_s5_i = vsubq_f32 (q_in_i0123, q_s1_i);
    q2_out_0123.val[0] = vaddq_f32 (q_in_r0123, q_s1_r);
    q2_out_0123.val[1] = vaddq_f32 (q_in_i0123, q_s1_i);

    q_s3_r = vaddq_f32 (q_s0_r, q_s2_r);
    q_s3_i = vaddq_f32 (q_s0_i, q_s2_i);
    q_s4_r = vsubq_f32 (q_s0_r, q_s2_r);
    q_s4_i = vsubq_f32 (q_s0_i, q_s2_i);

    q_one_by_nfft = vdupq_n_f32 (0.0625f);
    q2_out_89ab.val[0] = vsubq_f32 (q2_out_0123.val[0], q_s3_r);
    q2_out_89ab.val[1] = vsubq_f32 (q2_out_0123.val[1], q_s3_i);
    q2_out_0123.val[0] = vaddq_f32 (q2_out_0123.val[0], q_s3_r);
    q2_out_0123.val[1] = vaddq_f32 (q2_out_0123.val[1], q_s3_i);

    q2_out_4567.val[0] = vsubq_f32 (q_s5_r, q_s4_i);
    q2_out_4567.val[1] = vaddq_f32 (q_s5_i, q_s4_r);
    q2_out_cdef.val[0] = vaddq_f32 (q_s5_r, q_s4_i);
    q2_out_cdef.val[1] = vsubq_f32 (q_s5_i, q_s4_r);

    q2_out_89ab.val[0] = vmulq_f32 (q2_out_89ab.val[0], q_one_by_nfft);
    q2_out_89ab.val[1] = vmulq_f32 (q2_out_89ab.val[1], q_one_by_nfft);
    q2_out_0123.val[0] = vmulq_f32 (q2_out_0123.val[0], q_one_by_nfft);
    q2_out_0123.val[1] = vmulq_f32 (q2_out_0123.val[1], q_one_by_nfft);
    q2_out_4567.val[0] = vmulq_f32 (q2_out_4567.val[0], q_one_by_nfft);
    q2_out_4567.val[1] = vmulq_f32 (q2_out_4567.val[1], q_one_by_nfft);
    q2_out_cdef.val[0] = vmulq_f32 (q2_out_cdef.val[0], q_one_by_nfft);
    q2_out_cdef.val[1] = vmulq_f32 (q2_out_cdef.val[1], q_one_by_nfft);

    vst2q_f32 (p_dst0, q2_out_0123);
    vst2q_f32 (p_dst1, q2_out_4567);
    vst2q_f32 (p_dst2, q2_out_89ab);
    vst2q_f32 (p_dst3, q2_out_cdef);
}
示例#12
0
static void SubbandCoherenceNEON(AecCore* aec,
                                 float efw[2][PART_LEN1],
                                 float xfw[2][PART_LEN1],
                                 float* fft,
                                 float* cohde,
                                 float* cohxd) {
  float dfw[2][PART_LEN1];
  int i;

  if (aec->delayEstCtr == 0)
    aec->delayIdx = PartitionDelay(aec);

  // Use delayed far.
  memcpy(xfw,
         aec->xfwBuf + aec->delayIdx * PART_LEN1,
         sizeof(xfw[0][0]) * 2 * PART_LEN1);

  // Windowed near fft
  WindowData(fft, aec->dBuf);
  aec_rdft_forward_128(fft);
  StoreAsComplex(fft, dfw);

  // Windowed error fft
  WindowData(fft, aec->eBuf);
  aec_rdft_forward_128(fft);
  StoreAsComplex(fft, efw);

  SmoothedPSD(aec, efw, dfw, xfw);

  {
    const float32x4_t vec_1eminus10 =  vdupq_n_f32(1e-10f);

    // Subband coherence
    for (i = 0; i + 3 < PART_LEN1; i += 4) {
      const float32x4_t vec_sd = vld1q_f32(&aec->sd[i]);
      const float32x4_t vec_se = vld1q_f32(&aec->se[i]);
      const float32x4_t vec_sx = vld1q_f32(&aec->sx[i]);
      const float32x4_t vec_sdse = vmlaq_f32(vec_1eminus10, vec_sd, vec_se);
      const float32x4_t vec_sdsx = vmlaq_f32(vec_1eminus10, vec_sd, vec_sx);
      float32x4x2_t vec_sde = vld2q_f32(&aec->sde[i][0]);
      float32x4x2_t vec_sxd = vld2q_f32(&aec->sxd[i][0]);
      float32x4_t vec_cohde = vmulq_f32(vec_sde.val[0], vec_sde.val[0]);
      float32x4_t vec_cohxd = vmulq_f32(vec_sxd.val[0], vec_sxd.val[0]);
      vec_cohde = vmlaq_f32(vec_cohde, vec_sde.val[1], vec_sde.val[1]);
      vec_cohde = vdivq_f32(vec_cohde, vec_sdse);
      vec_cohxd = vmlaq_f32(vec_cohxd, vec_sxd.val[1], vec_sxd.val[1]);
      vec_cohxd = vdivq_f32(vec_cohxd, vec_sdsx);

      vst1q_f32(&cohde[i], vec_cohde);
      vst1q_f32(&cohxd[i], vec_cohxd);
    }
  }
  // scalar code for the remaining items.
  for (; i < PART_LEN1; i++) {
    cohde[i] =
        (aec->sde[i][0] * aec->sde[i][0] + aec->sde[i][1] * aec->sde[i][1]) /
        (aec->sd[i] * aec->se[i] + 1e-10f);
    cohxd[i] =
        (aec->sxd[i][0] * aec->sxd[i][0] + aec->sxd[i][1] * aec->sxd[i][1]) /
        (aec->sx[i] * aec->sd[i] + 1e-10f);
  }
}