示例#1
0
void WebRtcIsacfix_PitchFilterCore(int loopNumber,
                                   int16_t gain,
                                   size_t index,
                                   int16_t sign,
                                   int16_t* inputState,
                                   int16_t* outputBuf2,
                                   const int16_t* coefficient,
                                   int16_t* inputBuf,
                                   int16_t* outputBuf,
                                   int* index2) {
  int i = 0, j = 0;  /* Loop counters. */
  int16_t* ubufQQpos2 = &outputBuf2[PITCH_BUFFSIZE - (index + 2)];
  int16_t tmpW16 = 0;

  for (i = 0; i < loopNumber; i++) {
    int32_t tmpW32 = 0;

    /* Filter to get fractional pitch. */
    for (j = 0; j < PITCH_FRACORDER; j++) {
      tmpW32 += ubufQQpos2[*index2 + j] * coefficient[j];
    }

    /* Saturate to avoid overflow in tmpW16. */
    tmpW32 = WEBRTC_SPL_SAT(536862719, tmpW32, -536879104);
    tmpW32 += 8192;
    tmpW16 = (int16_t)(tmpW32 >> 14);

    /* Shift low pass filter state. */
    memmove(&inputState[1], &inputState[0],
            (PITCH_DAMPORDER - 1) * sizeof(int16_t));
    inputState[0] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
                      gain, tmpW16, 12);

    /* Low pass filter. */
    tmpW32 = 0;
    /* TODO(kma): Define a static inline function WebRtcSpl_DotProduct()
       in spl_inl.h to replace this and other similar loops. */
    for (j = 0; j < PITCH_DAMPORDER; j++) {
      tmpW32 += inputState[j] * kDampFilter[j];
    }

    /* Saturate to avoid overflow in tmpW16. */
    tmpW32 = WEBRTC_SPL_SAT(1073725439, tmpW32, -1073758208);
    tmpW32 += 16384;
    tmpW16 = (int16_t)(tmpW32 >> 15);

    /* Subtract from input and update buffer. */
    tmpW32 = inputBuf[*index2] - sign * tmpW16;
    outputBuf[*index2] = WebRtcSpl_SatW32ToW16(tmpW32);
    tmpW32 = inputBuf[*index2] + outputBuf[*index2];
    outputBuf2[*index2 + PITCH_BUFFSIZE] = WebRtcSpl_SatW32ToW16(tmpW32);

    (*index2)++;
  }
}
示例#2
0
// Update the noise estimation information.
static void UpdateNoiseEstimateNeon(NoiseSuppressionFixedC* inst, int offset) {
  const int16_t kExp2Const = 11819; // Q13
  int16_t* ptr_noiseEstLogQuantile = NULL;
  int16_t* ptr_noiseEstQuantile = NULL;
  int16x4_t kExp2Const16x4 = vdup_n_s16(kExp2Const);
  int32x4_t twentyOne32x4 = vdupq_n_s32(21);
  int32x4_t constA32x4 = vdupq_n_s32(0x1fffff);
  int32x4_t constB32x4 = vdupq_n_s32(0x200000);

  int16_t tmp16 = WebRtcSpl_MaxValueW16(inst->noiseEstLogQuantile + offset,
                                        inst->magnLen);

  // Guarantee a Q-domain as high as possible and still fit in int16
  inst->qNoise = 14 - (int) WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(kExp2Const,
                                                                 tmp16,
                                                                 21);

  int32x4_t qNoise32x4 = vdupq_n_s32(inst->qNoise);

  for (ptr_noiseEstLogQuantile = &inst->noiseEstLogQuantile[offset],
       ptr_noiseEstQuantile = &inst->noiseEstQuantile[0];
       ptr_noiseEstQuantile < &inst->noiseEstQuantile[inst->magnLen - 3];
       ptr_noiseEstQuantile += 4, ptr_noiseEstLogQuantile += 4) {

    // tmp32no2 = kExp2Const * inst->noiseEstLogQuantile[offset + i];
    int16x4_t v16x4 = vld1_s16(ptr_noiseEstLogQuantile);
    int32x4_t v32x4B = vmull_s16(v16x4, kExp2Const16x4);

    // tmp32no1 = (0x00200000 | (tmp32no2 & 0x001FFFFF)); // 2^21 + frac
    int32x4_t v32x4A = vandq_s32(v32x4B, constA32x4);
    v32x4A = vorrq_s32(v32x4A, constB32x4);

    // tmp16 = (int16_t)(tmp32no2 >> 21);
    v32x4B = vshrq_n_s32(v32x4B, 21);

    // tmp16 -= 21;// shift 21 to get result in Q0
    v32x4B = vsubq_s32(v32x4B, twentyOne32x4);

    // tmp16 += (int16_t) inst->qNoise;
    // shift to get result in Q(qNoise)
    v32x4B = vaddq_s32(v32x4B, qNoise32x4);

    // if (tmp16 < 0) {
    //   tmp32no1 >>= -tmp16;
    // } else {
    //   tmp32no1 <<= tmp16;
    // }
    v32x4B = vshlq_s32(v32x4A, v32x4B);

    // tmp16 = WebRtcSpl_SatW32ToW16(tmp32no1);
    v16x4 = vqmovn_s32(v32x4B);

    //inst->noiseEstQuantile[i] = tmp16;
    vst1_s16(ptr_noiseEstQuantile, v16x4);
  }

  // Last iteration:

  // inst->quantile[i]=exp(inst->lquantile[offset+i]);
  // in Q21
  int32_t tmp32no2 = kExp2Const * *ptr_noiseEstLogQuantile;
  int32_t tmp32no1 = (0x00200000 | (tmp32no2 & 0x001FFFFF)); // 2^21 + frac

  tmp16 = (int16_t)(tmp32no2 >> 21);
  tmp16 -= 21;// shift 21 to get result in Q0
  tmp16 += (int16_t) inst->qNoise; //shift to get result in Q(qNoise)
  if (tmp16 < 0) {
    tmp32no1 >>= -tmp16;
  } else {
示例#3
0
void WebRtcIsacfix_GetVars(const WebRtc_Word16 *input, const WebRtc_Word16 *pitchGains_Q12,
                           WebRtc_UWord32 *oldEnergy, WebRtc_Word16 *varscale)
{
  int k;
  WebRtc_UWord32 nrgQ[4];
  WebRtc_Word16 nrgQlog[4];
  WebRtc_Word16 tmp16, chng1, chng2, chng3, chng4, tmp, chngQ, oldNrgQlog, pgQ, pg3;
  WebRtc_Word32 expPg32;
  WebRtc_Word16 expPg, divVal;
  WebRtc_Word16 tmp16_1, tmp16_2;

  /* Calculate energies of first and second frame halfs */
  nrgQ[0]=0;
  for (k = QLOOKAHEAD/2; k < (FRAMESAMPLES/4 + QLOOKAHEAD) / 2; k++) {
    nrgQ[0] +=WEBRTC_SPL_MUL_16_16(input[k],input[k]);
  }
  nrgQ[1]=0;
  for ( ; k < (FRAMESAMPLES/2 + QLOOKAHEAD) / 2; k++) {
    nrgQ[1] +=WEBRTC_SPL_MUL_16_16(input[k],input[k]);
  }
  nrgQ[2]=0;
  for ( ; k < (WEBRTC_SPL_MUL_16_16(FRAMESAMPLES, 3)/4 + QLOOKAHEAD) / 2; k++) {
    nrgQ[2] +=WEBRTC_SPL_MUL_16_16(input[k],input[k]);
  }
  nrgQ[3]=0;
  for ( ; k < (FRAMESAMPLES + QLOOKAHEAD) / 2; k++) {
    nrgQ[3] +=WEBRTC_SPL_MUL_16_16(input[k],input[k]);
  }

  for ( k=0; k<4; k++) {
    nrgQlog[k] = (WebRtc_Word16)log2_Q8_LPC(nrgQ[k]); /* log2(nrgQ) */
  }
  oldNrgQlog = (WebRtc_Word16)log2_Q8_LPC(*oldEnergy);

  /* Calculate average level change */
  chng1 = WEBRTC_SPL_ABS_W16(nrgQlog[3]-nrgQlog[2]);
  chng2 = WEBRTC_SPL_ABS_W16(nrgQlog[2]-nrgQlog[1]);
  chng3 = WEBRTC_SPL_ABS_W16(nrgQlog[1]-nrgQlog[0]);
  chng4 = WEBRTC_SPL_ABS_W16(nrgQlog[0]-oldNrgQlog);
  tmp = chng1+chng2+chng3+chng4;
  chngQ = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp, kChngFactor, 10); /* Q12 */
  chngQ += 2926; /* + 1.0/1.4 in Q12 */

  /* Find average pitch gain */
  pgQ = 0;
  for (k=0; k<4; k++)
  {
    pgQ += pitchGains_Q12[k];
  }

  pg3 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(pgQ, pgQ,11); /* pgQ in Q(12+2)=Q14. Q14*Q14>>11 => Q17 */
  pg3 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(pgQ, pg3,13); /* Q17*Q14>>13 =>Q18  */
  pg3 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(pg3, kMulPitchGain ,5); /* Q10  kMulPitchGain = -25 = -200 in Q-3. */

  tmp16=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(kExp2,pg3,13);/* Q13*Q10>>13 => Q10*/
  if (tmp16<0) {
    tmp16_2 = (0x0400 | (tmp16 & 0x03FF));
    tmp16_1 = (WEBRTC_SPL_RSHIFT_W16((WebRtc_UWord16)(tmp16 ^ 0xFFFF), 10)-3); /* Gives result in Q14 */
    if (tmp16_1<0)
      expPg=(WebRtc_Word16) -WEBRTC_SPL_LSHIFT_W16(tmp16_2, -tmp16_1);
    else
      expPg=(WebRtc_Word16) -WEBRTC_SPL_RSHIFT_W16(tmp16_2, tmp16_1);
  } else
    expPg = (WebRtc_Word16) -16384; /* 1 in Q14, since 2^0=1 */

  expPg32 = (WebRtc_Word32)WEBRTC_SPL_LSHIFT_W16((WebRtc_Word32)expPg, 8); /* Q22 */
  divVal = WebRtcSpl_DivW32W16ResW16(expPg32, chngQ); /* Q22/Q12=Q10 */

  tmp16=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(kExp2,divVal,13);/* Q13*Q10>>13 => Q10*/
  if (tmp16<0) {
    tmp16_2 = (0x0400 | (tmp16 & 0x03FF));
    tmp16_1 = (WEBRTC_SPL_RSHIFT_W16((WebRtc_UWord16)(tmp16 ^ 0xFFFF), 10)-3); /* Gives result in Q14 */
    if (tmp16_1<0)
      expPg=(WebRtc_Word16) WEBRTC_SPL_LSHIFT_W16(tmp16_2, -tmp16_1);
    else
      expPg=(WebRtc_Word16) WEBRTC_SPL_RSHIFT_W16(tmp16_2, tmp16_1);
  } else
    expPg = (WebRtc_Word16) 16384; /* 1 in Q14, since 2^0=1 */

  *varscale = expPg-1;
  *oldEnergy = nrgQ[3];
}
示例#4
0
void WebRtcIsacfix_PitchFilter(int16_t* indatQQ, // Q10 if type is 1 or 4,
                                                       // Q0 if type is 2.
                               int16_t* outdatQQ,
                               PitchFiltstr* pfp,
                               int16_t* lagsQ7,
                               int16_t* gainsQ12,
                               int16_t type) {
  int    k, ind, cnt;
  int16_t sign = 1;
  int16_t inystateQQ[PITCH_DAMPORDER];
  int16_t ubufQQ[PITCH_INTBUFFSIZE + QLOOKAHEAD];
  const int16_t Gain = 21299;     // 1.3 in Q14
  int16_t oldLagQ7;
  int16_t oldGainQ12, lagdeltaQ7, curLagQ7, gaindeltaQ12, curGainQ12;
  int indW32 = 0, frcQQ = 0;
  int32_t tmpW32;
  const int16_t* fracoeffQQ = NULL;

  // Assumptions in ARM assembly for WebRtcIsacfix_PitchFilterCoreARM().
  COMPILE_ASSERT(PITCH_FRACORDER == 9);
  COMPILE_ASSERT(PITCH_DAMPORDER == 5);

  // Set up buffer and states.
  memcpy(ubufQQ, pfp->ubufQQ, sizeof(pfp->ubufQQ));
  memcpy(inystateQQ, pfp->ystateQQ, sizeof(inystateQQ));

  // Get old lag and gain value from memory.
  oldLagQ7 = pfp->oldlagQ7;
  oldGainQ12 = pfp->oldgainQ12;

  if (type == 4) {
    sign = -1;

    // Make output more periodic.
    for (k = 0; k < PITCH_SUBFRAMES; k++) {
      gainsQ12[k] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
          gainsQ12[k], Gain, 14);
    }
  }

  // No interpolation if pitch lag step is big.
  if ((WEBRTC_SPL_MUL_16_16_RSFT(lagsQ7[0], 3, 1) < oldLagQ7) ||
      (lagsQ7[0] > WEBRTC_SPL_MUL_16_16_RSFT(oldLagQ7, 3, 1))) {
    oldLagQ7 = lagsQ7[0];
    oldGainQ12 = gainsQ12[0];
  }

  ind = 0;

  for (k = 0; k < PITCH_SUBFRAMES; k++) {
    // Calculate interpolation steps.
    lagdeltaQ7 = lagsQ7[k] - oldLagQ7;
    lagdeltaQ7 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
                  lagdeltaQ7, kDivFactor, 15);
    curLagQ7 = oldLagQ7;
    gaindeltaQ12 = gainsQ12[k] - oldGainQ12;
    gaindeltaQ12 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
                    gaindeltaQ12, kDivFactor, 15);

    curGainQ12 = oldGainQ12;
    oldLagQ7 = lagsQ7[k];
    oldGainQ12 = gainsQ12[k];

    // Each frame has 4 60-sample pitch subframes, and each subframe has 5
    // 12-sample segments. Each segment need to be processed with
    // newly-updated parameters, so we break the pitch filtering into
    // two for-loops (5 x 12) below. It's also why kDivFactor = 0.2 (in Q15).
    for (cnt = 0; cnt < kSegments; cnt++) {
      // Update parameters for each segment.
      curGainQ12 += gaindeltaQ12;
      curLagQ7 += lagdeltaQ7;
      indW32 = CalcLrIntQ(curLagQ7, 7);
      tmpW32 = WEBRTC_SPL_LSHIFT_W32(indW32, 7);
      tmpW32 -= curLagQ7;
      frcQQ = WEBRTC_SPL_RSHIFT_W32(tmpW32, 4);
      frcQQ += 4;

      if (frcQQ == PITCH_FRACS) {
        frcQQ = 0;
      }
      fracoeffQQ = kIntrpCoef[frcQQ];

      // Pitch filtering.
      WebRtcIsacfix_PitchFilterCore(PITCH_SUBFRAME_LEN / kSegments, curGainQ12,
        indW32, sign, inystateQQ, ubufQQ, fracoeffQQ, indatQQ, outdatQQ, &ind);
    }
  }

  // Export buffer and states.
  memcpy(pfp->ubufQQ, ubufQQ + PITCH_FRAME_LEN, sizeof(pfp->ubufQQ));
  memcpy(pfp->ystateQQ, inystateQQ, sizeof(pfp->ystateQQ));

  pfp->oldlagQ7 = oldLagQ7;
  pfp->oldgainQ12 = oldGainQ12;

  if (type == 2) {
    // Filter look-ahead segment.
    WebRtcIsacfix_PitchFilterCore(QLOOKAHEAD, curGainQ12, indW32, 1, inystateQQ,
                ubufQQ, fracoeffQQ, indatQQ, outdatQQ, &ind);
  }
}
示例#5
0
void WebRtcIsacfix_PitchFilterGains(const int16_t* indatQ0,
                                    PitchFiltstr* pfp,
                                    int16_t* lagsQ7,
                                    int16_t* gainsQ12) {
  int  k, n, m, ind, pos, pos3QQ;

  int16_t ubufQQ[PITCH_INTBUFFSIZE];
  int16_t oldLagQ7, lagdeltaQ7, curLagQ7;
  const int16_t* fracoeffQQ = NULL;
  int16_t scale;
  int16_t cnt = 0, frcQQ, indW16 = 0, tmpW16;
  int32_t tmpW32, tmp2W32, csum1QQ, esumxQQ;

  // Set up buffer and states.
  memcpy(ubufQQ, pfp->ubufQQ, sizeof(pfp->ubufQQ));
  oldLagQ7 = pfp->oldlagQ7;

  // No interpolation if pitch lag step is big.
  if ((WEBRTC_SPL_MUL_16_16_RSFT(lagsQ7[0], 3, 1) < oldLagQ7) ||
      (lagsQ7[0] > WEBRTC_SPL_MUL_16_16_RSFT(oldLagQ7, 3, 1))) {
    oldLagQ7 = lagsQ7[0];
  }

  ind = 0;
  pos = ind + PITCH_BUFFSIZE;
  scale = 0;
  for (k = 0; k < PITCH_SUBFRAMES; k++) {

    // Calculate interpolation steps.
    lagdeltaQ7 = lagsQ7[k] - oldLagQ7;
    lagdeltaQ7 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
                   lagdeltaQ7, kDivFactor, 15);
    curLagQ7 = oldLagQ7;
    oldLagQ7 = lagsQ7[k];

    csum1QQ = 1;
    esumxQQ = 1;

    // Same as function WebRtcIsacfix_PitchFilter(), we break the pitch
    // filtering into two for-loops (5 x 12) below.
    for (cnt = 0; cnt < kSegments; cnt++) {
      // Update parameters for each segment.
      curLagQ7 += lagdeltaQ7;
      indW16 = (int16_t)CalcLrIntQ(curLagQ7, 7);
      tmpW16 = WEBRTC_SPL_LSHIFT_W16(indW16, 7);
      tmpW16 -= curLagQ7;
      frcQQ = WEBRTC_SPL_RSHIFT_W16(tmpW16, 4);
      frcQQ += 4;

      if (frcQQ == PITCH_FRACS) {
        frcQQ = 0;
      }
      fracoeffQQ = kIntrpCoef[frcQQ];

      pos3QQ = pos - (indW16 + 4);

      for (n = 0; n < PITCH_SUBFRAME_LEN / kSegments; n++) {
        // Filter to get fractional pitch.

        tmpW32 = 0;
        for (m = 0; m < PITCH_FRACORDER; m++) {
          tmpW32 += WEBRTC_SPL_MUL_16_16(ubufQQ[pos3QQ + m], fracoeffQQ[m]);
        }

        // Subtract from input and update buffer.
        ubufQQ[pos] = indatQ0[ind];

        tmp2W32 = WEBRTC_SPL_MUL_16_32_RSFT14(indatQ0[ind], tmpW32);
        tmpW32 += 8192;
        tmpW16 = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmpW32, 14);
        tmpW32 = WEBRTC_SPL_MUL_16_16(tmpW16, tmpW16);

        if ((tmp2W32 > 1073700000) || (csum1QQ > 1073700000) ||
            (tmpW32 > 1073700000) || (esumxQQ > 1073700000)) {  // 2^30
          scale++;
          csum1QQ = WEBRTC_SPL_RSHIFT_W32(csum1QQ, 1);
          esumxQQ = WEBRTC_SPL_RSHIFT_W32(esumxQQ, 1);
        }
        tmp2W32 = WEBRTC_SPL_RSHIFT_W32(tmp2W32, scale);
        csum1QQ += tmp2W32;
        tmpW32 = WEBRTC_SPL_RSHIFT_W32(tmpW32, scale);
        esumxQQ += tmpW32;

        ind++;
        pos++;
        pos3QQ++;
      }
    }

    if (csum1QQ < esumxQQ) {
      tmp2W32 = WebRtcSpl_DivResultInQ31(csum1QQ, esumxQQ);

      // Gain should be half the correlation.
      tmpW32 = WEBRTC_SPL_RSHIFT_W32(tmp2W32, 20);
    } else {
      tmpW32 = 4096;
    }
    gainsQ12[k] = (int16_t)WEBRTC_SPL_SAT(PITCH_MAX_GAIN_Q12, tmpW32, 0);
  }

  // Export buffer and states.
  memcpy(pfp->ubufQQ, ubufQQ + PITCH_FRAME_LEN, sizeof(pfp->ubufQQ));
  pfp->oldlagQ7 = lagsQ7[PITCH_SUBFRAMES - 1];
  pfp->oldgainQ12 = gainsQ12[PITCH_SUBFRAMES - 1];

}
示例#6
0
// Compute speech/noise probability
// speech/noise probability is returned in: probSpeechFinal
//snrLocPrior is the prior SNR for each frequency (in Q11)
//snrLocPost is the post SNR for each frequency (in Q11)
void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst,
                               uint16_t* nonSpeechProbFinal,
                               uint32_t* priorLocSnr,
                               uint32_t* postLocSnr) {

  uint32_t zeros, num, den, tmpU32no1, tmpU32no2, tmpU32no3;
  int32_t invLrtFX, indPriorFX, tmp32, tmp32no1, tmp32no2, besselTmpFX32;
  int32_t frac32, logTmp;
  int32_t logLrtTimeAvgKsumFX;
  int16_t indPriorFX16;
  int16_t tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac, intPart;
  int i, normTmp, normTmp2, nShifts;

  // compute feature based on average LR factor
  // this is the average over all frequencies of the smooth log LRT
  logLrtTimeAvgKsumFX = 0;
  for (i = 0; i < inst->magnLen; i++) {
    besselTmpFX32 = (int32_t)postLocSnr[i]; // Q11
    normTmp = WebRtcSpl_NormU32(postLocSnr[i]);
    num = WEBRTC_SPL_LSHIFT_U32(postLocSnr[i], normTmp); // Q(11+normTmp)
    if (normTmp > 10) {
      den = WEBRTC_SPL_LSHIFT_U32(priorLocSnr[i], normTmp - 11); // Q(normTmp)
    } else {
      den = WEBRTC_SPL_RSHIFT_U32(priorLocSnr[i], 11 - normTmp); // Q(normTmp)
    }
    if (den > 0) {
      besselTmpFX32 -= WEBRTC_SPL_UDIV(num, den); // Q11
    } else {
      besselTmpFX32 -= num; // Q11
    }

    // inst->logLrtTimeAvg[i] += LRT_TAVG * (besselTmp - log(snrLocPrior)
    //                                       - inst->logLrtTimeAvg[i]);
    // Here, LRT_TAVG = 0.5
    zeros = WebRtcSpl_NormU32(priorLocSnr[i]);
    frac32 = (int32_t)(((priorLocSnr[i] << zeros) & 0x7FFFFFFF) >> 19);
    tmp32 = WEBRTC_SPL_MUL(frac32, frac32);
    tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(tmp32, -43), 19);
    tmp32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)frac32, 5412, 12);
    frac32 = tmp32 + 37;
    // tmp32 = log2(priorLocSnr[i])
    tmp32 = (int32_t)(((31 - zeros) << 12) + frac32) - (11 << 12); // Q12
    logTmp = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_32_16(tmp32, 178), 8);
                                                  // log2(priorLocSnr[i])*log(2)
    tmp32no1 = WEBRTC_SPL_RSHIFT_W32(logTmp + inst->logLrtTimeAvgW32[i], 1);
                                                  // Q12
    inst->logLrtTimeAvgW32[i] += (besselTmpFX32 - tmp32no1); // Q12

    logLrtTimeAvgKsumFX += inst->logLrtTimeAvgW32[i]; // Q12
  }
  inst->featureLogLrt = WEBRTC_SPL_RSHIFT_W32(logLrtTimeAvgKsumFX * 5,
                                              inst->stages + 10);
                                                  // 5 = BIN_SIZE_LRT / 2
  // done with computation of LR factor

  //
  //compute the indicator functions
  //

  // average LRT feature
  // FLOAT code
  // indicator0 = 0.5 * (tanh(widthPrior *
  //                      (logLrtTimeAvgKsum - threshPrior0)) + 1.0);
  tmpIndFX = 16384; // Q14(1.0)
  tmp32no1 = logLrtTimeAvgKsumFX - inst->thresholdLogLrt; // Q12
  nShifts = 7 - inst->stages; // WIDTH_PR_MAP_SHIFT - inst->stages + 5;
  //use larger width in tanh map for pause regions
  if (tmp32no1 < 0) {
    tmpIndFX = 0;
    tmp32no1 = -tmp32no1;
    //widthPrior = widthPrior * 2.0;
    nShifts++;
  }
  tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, nShifts); // Q14
  // compute indicator function: sigmoid map
  tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 14);
  if ((tableIndex < 16) && (tableIndex >= 0)) {
    tmp16no2 = kIndicatorTable[tableIndex];
    tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
    frac = (int16_t)(tmp32no1 & 0x00003fff); // Q14
    tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
    if (tmpIndFX == 0) {
      tmpIndFX = 8192 - tmp16no2; // Q14
    } else {
      tmpIndFX = 8192 + tmp16no2; // Q14
    }
  }
  indPriorFX = WEBRTC_SPL_MUL_16_16(inst->weightLogLrt, tmpIndFX); // 6*Q14

  //spectral flatness feature
  if (inst->weightSpecFlat) {
    tmpU32no1 = WEBRTC_SPL_UMUL(inst->featureSpecFlat, 400); // Q10
    tmpIndFX = 16384; // Q14(1.0)
    //use larger width in tanh map for pause regions
    tmpU32no2 = inst->thresholdSpecFlat - tmpU32no1; //Q10
    nShifts = 4;
    if (inst->thresholdSpecFlat < tmpU32no1) {
      tmpIndFX = 0;
      tmpU32no2 = tmpU32no1 - inst->thresholdSpecFlat;
      //widthPrior = widthPrior * 2.0;
      nShifts++;
    }
    tmp32no1 = (int32_t)WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2,
                                                                  nShifts), 25);
                                                     //Q14
    tmpU32no1 = WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, nShifts),
                                    25); //Q14
    // compute indicator function: sigmoid map
    // FLOAT code
    // indicator1 = 0.5 * (tanh(sgnMap * widthPrior *
    //                          (threshPrior1 - tmpFloat1)) + 1.0);
    tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14);
    if (tableIndex < 16) {
      tmp16no2 = kIndicatorTable[tableIndex];
      tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
      frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
      tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
      if (tmpIndFX) {
        tmpIndFX = 8192 + tmp16no2; // Q14
      } else {
        tmpIndFX = 8192 - tmp16no2; // Q14
      }
    }
    indPriorFX += WEBRTC_SPL_MUL_16_16(inst->weightSpecFlat, tmpIndFX); // 6*Q14
  }

  //for template spectral-difference
  if (inst->weightSpecDiff) {
    tmpU32no1 = 0;
    if (inst->featureSpecDiff) {
      normTmp = WEBRTC_SPL_MIN(20 - inst->stages,
                               WebRtcSpl_NormU32(inst->featureSpecDiff));
      tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(inst->featureSpecDiff, normTmp);
                                                         // Q(normTmp-2*stages)
      tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->timeAvgMagnEnergy,
                                        20 - inst->stages - normTmp);
      if (tmpU32no2 > 0) {
        // Q(20 - inst->stages)
        tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no1, tmpU32no2);
      } else {
        tmpU32no1 = (uint32_t)(0x7fffffff);
      }
    }
    tmpU32no3 = WEBRTC_SPL_UDIV(WEBRTC_SPL_LSHIFT_U32(inst->thresholdSpecDiff,
                                                      17),
                                25);
    tmpU32no2 = tmpU32no1 - tmpU32no3;
    nShifts = 1;
    tmpIndFX = 16384; // Q14(1.0)
    //use larger width in tanh map for pause regions
    if (tmpU32no2 & 0x80000000) {
      tmpIndFX = 0;
      tmpU32no2 = tmpU32no3 - tmpU32no1;
      //widthPrior = widthPrior * 2.0;
      nShifts--;
    }
    tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no2, nShifts);
    // compute indicator function: sigmoid map
    /* FLOAT code
     indicator2 = 0.5 * (tanh(widthPrior * (tmpFloat1 - threshPrior2)) + 1.0);
     */
    tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14);
    if (tableIndex < 16) {
      tmp16no2 = kIndicatorTable[tableIndex];
      tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
      frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
      tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
                    tmp16no1, frac, 14);
      if (tmpIndFX) {
        tmpIndFX = 8192 + tmp16no2;
      } else {
        tmpIndFX = 8192 - tmp16no2;
      }
    }
    indPriorFX += WEBRTC_SPL_MUL_16_16(inst->weightSpecDiff, tmpIndFX); // 6*Q14
  }

  //combine the indicator function with the feature weights
  // FLOAT code
  // indPrior = 1 - (weightIndPrior0 * indicator0 + weightIndPrior1 *
  //                 indicator1 + weightIndPrior2 * indicator2);
  indPriorFX16 = WebRtcSpl_DivW32W16ResW16(98307 - indPriorFX, 6); // Q14
  // done with computing indicator function

  //compute the prior probability
  // FLOAT code
  // inst->priorNonSpeechProb += PRIOR_UPDATE *
  //                             (indPriorNonSpeech - inst->priorNonSpeechProb);
  tmp16 = indPriorFX16 - inst->priorNonSpeechProb; // Q14
  inst->priorNonSpeechProb += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
                                PRIOR_UPDATE_Q14, tmp16, 14); // Q14

  //final speech probability: combine prior model with LR factor:

  memset(nonSpeechProbFinal, 0, sizeof(uint16_t) * inst->magnLen);

  if (inst->priorNonSpeechProb > 0) {
    for (i = 0; i < inst->magnLen; i++) {
      // FLOAT code
      // invLrt = exp(inst->logLrtTimeAvg[i]);
      // invLrt = inst->priorSpeechProb * invLrt;
      // nonSpeechProbFinal[i] = (1.0 - inst->priorSpeechProb) /
      //                         (1.0 - inst->priorSpeechProb + invLrt);
      // invLrt = (1.0 - inst->priorNonSpeechProb) * invLrt;
      // nonSpeechProbFinal[i] = inst->priorNonSpeechProb /
      //                         (inst->priorNonSpeechProb + invLrt);
      if (inst->logLrtTimeAvgW32[i] < 65300) {
        tmp32no1 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(
                                           inst->logLrtTimeAvgW32[i], 23637),
                                         14); // Q12
        intPart = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 12);
        if (intPart < -8) {
          intPart = -8;
        }
        frac = (int16_t)(tmp32no1 & 0x00000fff); // Q12

        // Quadratic approximation of 2^frac
        tmp32no2 = WEBRTC_SPL_RSHIFT_W32(frac * frac * 44, 19); // Q12
        tmp32no2 += WEBRTC_SPL_MUL_16_16_RSFT(frac, 84, 7); // Q12
        invLrtFX = WEBRTC_SPL_LSHIFT_W32(1, 8 + intPart)
                   + WEBRTC_SPL_SHIFT_W32(tmp32no2, intPart - 4); // Q8

        normTmp = WebRtcSpl_NormW32(invLrtFX);
        normTmp2 = WebRtcSpl_NormW16((16384 - inst->priorNonSpeechProb));
        if (normTmp + normTmp2 >= 7) {
          if (normTmp + normTmp2 < 15) {
            invLrtFX = WEBRTC_SPL_RSHIFT_W32(invLrtFX, 15 - normTmp2 - normTmp);
            // Q(normTmp+normTmp2-7)
            tmp32no1 = WEBRTC_SPL_MUL_32_16(invLrtFX,
                                            (16384 - inst->priorNonSpeechProb));
            // Q(normTmp+normTmp2+7)
            invLrtFX = WEBRTC_SPL_SHIFT_W32(tmp32no1, 7 - normTmp - normTmp2);
                                                                  // Q14
          } else {
            tmp32no1 = WEBRTC_SPL_MUL_32_16(invLrtFX,
                                            (16384 - inst->priorNonSpeechProb));
                                                                  // Q22
            invLrtFX = WEBRTC_SPL_RSHIFT_W32(tmp32no1, 8); // Q14
          }

          tmp32no1 = WEBRTC_SPL_LSHIFT_W32((int32_t)inst->priorNonSpeechProb,
                                           8); // Q22

          nonSpeechProbFinal[i] = (uint16_t)WEBRTC_SPL_DIV(tmp32no1,
              (int32_t)inst->priorNonSpeechProb + invLrtFX); // Q8
        }
      }
    }
  }
}