示例#1
0
/* 1D parabolic interpolation . All input and output values are in Q8 */
static __inline void Intrp1DQ8(int32_t *x, int32_t *fx, int32_t *y, int32_t *fy) {

  int16_t sign1=1, sign2=1;
  int32_t r32, q32, t32, nom32, den32;
  int16_t t16, tmp16, tmp16_1;

  if ((fx[0]>0) && (fx[2]>0)) {
    r32=fx[1]-fx[2];
    q32=fx[0]-fx[1];
    nom32=q32+r32;
    den32=WEBRTC_SPL_MUL_32_16((q32-r32), 2);
    if (nom32<0)
      sign1=-1;
    if (den32<0)
      sign2=-1;

    /* t = (q32+r32)/(2*(q32-r32)) = (fx[0]-fx[1] + fx[1]-fx[2])/(2 * fx[0]-fx[1] - (fx[1]-fx[2]))*/
    /* (Signs are removed because WebRtcSpl_DivResultInQ31 can't handle negative numbers) */
    t32=WebRtcSpl_DivResultInQ31(WEBRTC_SPL_MUL_32_16(nom32, sign1),WEBRTC_SPL_MUL_32_16(den32, sign2)); /* t in Q31, without signs */

    t16=(int16_t)WEBRTC_SPL_RSHIFT_W32(t32, 23);  /* Q8 */
    t16=t16*sign1*sign2;        /* t in Q8 with signs */

    *y = x[0]+t16;          /* Q8 */
    // *y = x[1]+t16;          /* Q8 */

    /* The following code calculates fy in three steps */
    /* fy = 0.5 * t * (t-1) * fx[0] + (1-t*t) * fx[1] + 0.5 * t * (t+1) * fx[2]; */

    /* Part I: 0.5 * t * (t-1) * fx[0] */
    tmp16_1=(int16_t)WEBRTC_SPL_MUL_16_16(t16,t16); /* Q8*Q8=Q16 */
    tmp16_1 = WEBRTC_SPL_RSHIFT_W16(tmp16_1,2);  /* Q16>>2 = Q14 */
    t16 = (int16_t)WEBRTC_SPL_MUL_16_16(t16, 64);           /* Q8<<6 = Q14  */
    tmp16 = tmp16_1-t16;
    *fy = WEBRTC_SPL_MUL_16_32_RSFT15(tmp16, fx[0]); /* (Q14 * Q8 >>15)/2 = Q8 */

    /* Part II: (1-t*t) * fx[1] */
    tmp16 = 16384-tmp16_1;        /* 1 in Q14 - Q14 */
    *fy += WEBRTC_SPL_MUL_16_32_RSFT14(tmp16, fx[1]);/* Q14 * Q8 >> 14 = Q8 */

    /* Part III: 0.5 * t * (t+1) * fx[2] */
    tmp16 = tmp16_1+t16;
    *fy += WEBRTC_SPL_MUL_16_32_RSFT15(tmp16, fx[2]);/* (Q14 * Q8 >>15)/2 = Q8 */
  } else {
    *y = x[0];
    *fy= fx[1];
  }
}
示例#2
0
文件: recin.c 项目: CryptoCall/webrtc
WebRtc_UWord32 WebRtcNetEQ_ScaleTimestampInternalToExternal(const MCUInst_t *MCU_inst,
                                                            WebRtc_UWord32 internalTS)
{
    WebRtc_Word32 timestampDiff;
    WebRtc_UWord32 externalTS;

    /* difference between this and last incoming timestamp */
    timestampDiff = (WebRtc_Word32) internalTS - MCU_inst->internalTS;

    switch (MCU_inst->scalingFactor)
    {
        case kTSscalingTwo:
        {
            /* divide by 2 */
            timestampDiff = WEBRTC_SPL_RSHIFT_W32(timestampDiff, 1);
            break;
        }
        case kTSscalingTwoThirds:
        {
            /* multiply with 3/2 */
            timestampDiff = WEBRTC_SPL_MUL_32_16(timestampDiff, 3);
            timestampDiff = WEBRTC_SPL_RSHIFT_W32(timestampDiff, 1);
            break;
        }
        case kTSscalingFourThirds:
        {
            /* multiply with 3/4 */
            timestampDiff = WEBRTC_SPL_MUL_32_16(timestampDiff, 3);
            timestampDiff = WEBRTC_SPL_RSHIFT_W32(timestampDiff, 2);
            break;
        }
        default:
        {
            /* no scaling */
        }
    }

    /* add the scaled difference to last scaled timestamp and save ... */
    externalTS = MCU_inst->externalTS + timestampDiff;

    return externalTS;
}
示例#3
0
int WebRtcNetEQ_UpdateIatStatistics(AutomodeInst_t *inst, int maxBufLen,
                                    uint16_t seqNumber, uint32_t timeStamp,
                                    int32_t fsHz, int mdCodec, int streamingMode)
{
    uint32_t timeIat; /* inter-arrival time */
    int i;
    int32_t tempsum = 0; /* temp summation */
    int32_t tempvar; /* temporary variable */
    int retval = 0; /* return value */
    int16_t packetLenSamp; /* packet speech length in samples */

    /****************/
    /* Sanity check */
    /****************/

    if (maxBufLen <= 1 || fsHz <= 0)
    {
        /* maxBufLen must be at least 2 and fsHz must both be strictly positive */
        return -1;
    }

    /****************************/
    /* Update packet statistics */
    /****************************/

    /* Try calculating packet length from current and previous timestamps */
    if (!WebRtcNetEQ_IsNewerTimestamp(timeStamp, inst->lastTimeStamp) ||
        !WebRtcNetEQ_IsNewerSequenceNumber(seqNumber, inst->lastSeqNo))
    {
        /* Wrong timestamp or sequence order; revert to backup plan */
        packetLenSamp = inst->packetSpeechLenSamp; /* use stored value */
    }
    else
    {
        /* calculate timestamps per packet */
        packetLenSamp = (int16_t) WebRtcSpl_DivU32U16(timeStamp - inst->lastTimeStamp,
            seqNumber - inst->lastSeqNo);
    }

    /* Check that the packet size is positive; if not, the statistics cannot be updated. */
    if (inst->firstPacketReceived && packetLenSamp > 0)
    { /* packet size ok */

        /* calculate inter-arrival time in integer packets (rounding down) */
        timeIat = WebRtcSpl_DivW32W16(inst->packetIatCountSamp, packetLenSamp);

        /* Special operations for streaming mode */
        if (streamingMode != 0)
        {
            /*
             * Calculate IAT in Q8, including fractions of a packet (i.e., more accurate
             * than timeIat).
             */
            int16_t timeIatQ8 = (int16_t) WebRtcSpl_DivW32W16(
                WEBRTC_SPL_LSHIFT_W32(inst->packetIatCountSamp, 8), packetLenSamp);

            /*
             * Calculate cumulative sum iat with sequence number compensation (ideal arrival
             * times makes this sum zero).
             */
            inst->cSumIatQ8 += (timeIatQ8
                - WEBRTC_SPL_LSHIFT_W32(seqNumber - inst->lastSeqNo, 8));

            /* subtract drift term */
            inst->cSumIatQ8 -= CSUM_IAT_DRIFT;

            /* ensure not negative */
            inst->cSumIatQ8 = WEBRTC_SPL_MAX(inst->cSumIatQ8, 0);

            /* remember max */
            if (inst->cSumIatQ8 > inst->maxCSumIatQ8)
            {
                inst->maxCSumIatQ8 = inst->cSumIatQ8;
                inst->maxCSumUpdateTimer = 0;
            }

            /* too long since the last maximum was observed; decrease max value */
            if (inst->maxCSumUpdateTimer > (uint32_t) WEBRTC_SPL_MUL_32_16(fsHz,
                MAX_STREAMING_PEAK_PERIOD))
            {
                inst->maxCSumIatQ8 -= 4; /* remove 1000*4/256 = 15.6 ms/s */
            }
        } /* end of streaming mode */

        /* check for discontinuous packet sequence and re-ordering */
        if (WebRtcNetEQ_IsNewerSequenceNumber(seqNumber, inst->lastSeqNo + 1))
        {
            /* Compensate for gap in the sequence numbers.
             * Reduce IAT with expected extra time due to lost packets, but ensure that
             * the IAT is not negative.
             */
            timeIat -= WEBRTC_SPL_MIN(timeIat,
                (uint16_t) (seqNumber - (uint16_t) (inst->lastSeqNo + 1)));
        }
        else if (!WebRtcNetEQ_IsNewerSequenceNumber(seqNumber, inst->lastSeqNo))
        {
            /* compensate for re-ordering */
            timeIat += (uint16_t) (inst->lastSeqNo + 1 - seqNumber);
        }

        /* saturate IAT at maximum value */
        timeIat = WEBRTC_SPL_MIN( timeIat, MAX_IAT );

        /* update iatProb = forgetting_factor * iatProb for all elements */
        for (i = 0; i <= MAX_IAT; i++)
        {
            int32_t tempHi, tempLo; /* Temporary variables */

            /*
             * Multiply iatProbFact (Q15) with iatProb (Q30) and right-shift 15 steps
             * to come back to Q30. The operation is done in two steps:
             */

            /*
             * 1) Multiply the high 16 bits (15 bits + sign) of iatProb. Shift iatProb
             * 16 steps right to get the high 16 bits in a int16_t prior to
             * multiplication, and left-shift with 1 afterwards to come back to
             * Q30 = (Q15 * (Q30>>16)) << 1.
             */
            tempHi = WEBRTC_SPL_MUL_16_16(inst->iatProbFact,
                (int16_t) WEBRTC_SPL_RSHIFT_W32(inst->iatProb[i], 16));
            tempHi = WEBRTC_SPL_LSHIFT_W32(tempHi, 1); /* left-shift 1 step */

            /*
             * 2) Isolate and multiply the low 16 bits of iatProb. Right-shift 15 steps
             * afterwards to come back to Q30 = (Q15 * Q30) >> 15.
             */
            tempLo = inst->iatProb[i] & 0x0000FFFF; /* sift out the 16 low bits */
            tempLo = WEBRTC_SPL_MUL_16_U16(inst->iatProbFact,
                (uint16_t) tempLo);
            tempLo = WEBRTC_SPL_RSHIFT_W32(tempLo, 15);

            /* Finally, add the high and low parts */
            inst->iatProb[i] = tempHi + tempLo;

            /* Sum all vector elements while we are at it... */
            tempsum += inst->iatProb[i];
        }

        /*
         * Increase the probability for the currently observed inter-arrival time
         * with 1 - iatProbFact. The factor is in Q15, iatProb in Q30;
         * hence, left-shift 15 steps to obtain result in Q30.
         */
        inst->iatProb[timeIat] += (32768 - inst->iatProbFact) << 15;

        tempsum += (32768 - inst->iatProbFact) << 15; /* add to vector sum */

        /*
         * Update iatProbFact (changes only during the first seconds after reset)
         * The factor converges to IAT_PROB_FACT.
         */
        inst->iatProbFact += (IAT_PROB_FACT - inst->iatProbFact + 3) >> 2;

        /* iatProb should sum up to 1 (in Q30). */
        tempsum -= 1 << 30; /* should be zero */

        /* Check if it does, correct if it doesn't. */
        if (tempsum > 0)
        {
            /* tempsum too large => decrease a few values in the beginning */
            i = 0;
            while (i <= MAX_IAT && tempsum > 0)
            {
                /* Remove iatProb[i] / 16 from iatProb, but not more than tempsum */
                tempvar = WEBRTC_SPL_MIN(tempsum, inst->iatProb[i] >> 4);
                inst->iatProb[i++] -= tempvar;
                tempsum -= tempvar;
            }
        }
示例#4
0
// Compute speech/noise probability
// speech/noise probability is returned in: probSpeechFinal
//snrLocPrior is the prior SNR for each frequency (in Q11)
//snrLocPost is the post SNR for each frequency (in Q11)
void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst,
                               uint16_t* nonSpeechProbFinal,
                               uint32_t* priorLocSnr,
                               uint32_t* postLocSnr) {

  uint32_t zeros, num, den, tmpU32no1, tmpU32no2, tmpU32no3;
  int32_t invLrtFX, indPriorFX, tmp32, tmp32no1, tmp32no2, besselTmpFX32;
  int32_t frac32, logTmp;
  int32_t logLrtTimeAvgKsumFX;
  int16_t indPriorFX16;
  int16_t tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac, intPart;
  int i, normTmp, normTmp2, nShifts;

  // compute feature based on average LR factor
  // this is the average over all frequencies of the smooth log LRT
  logLrtTimeAvgKsumFX = 0;
  for (i = 0; i < inst->magnLen; i++) {
    besselTmpFX32 = (int32_t)postLocSnr[i]; // Q11
    normTmp = WebRtcSpl_NormU32(postLocSnr[i]);
    num = WEBRTC_SPL_LSHIFT_U32(postLocSnr[i], normTmp); // Q(11+normTmp)
    if (normTmp > 10) {
      den = WEBRTC_SPL_LSHIFT_U32(priorLocSnr[i], normTmp - 11); // Q(normTmp)
    } else {
      den = WEBRTC_SPL_RSHIFT_U32(priorLocSnr[i], 11 - normTmp); // Q(normTmp)
    }
    if (den > 0) {
      besselTmpFX32 -= WEBRTC_SPL_UDIV(num, den); // Q11
    } else {
      besselTmpFX32 -= num; // Q11
    }

    // inst->logLrtTimeAvg[i] += LRT_TAVG * (besselTmp - log(snrLocPrior)
    //                                       - inst->logLrtTimeAvg[i]);
    // Here, LRT_TAVG = 0.5
    zeros = WebRtcSpl_NormU32(priorLocSnr[i]);
    frac32 = (int32_t)(((priorLocSnr[i] << zeros) & 0x7FFFFFFF) >> 19);
    tmp32 = WEBRTC_SPL_MUL(frac32, frac32);
    tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(tmp32, -43), 19);
    tmp32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)frac32, 5412, 12);
    frac32 = tmp32 + 37;
    // tmp32 = log2(priorLocSnr[i])
    tmp32 = (int32_t)(((31 - zeros) << 12) + frac32) - (11 << 12); // Q12
    logTmp = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_32_16(tmp32, 178), 8);
                                                  // log2(priorLocSnr[i])*log(2)
    tmp32no1 = WEBRTC_SPL_RSHIFT_W32(logTmp + inst->logLrtTimeAvgW32[i], 1);
                                                  // Q12
    inst->logLrtTimeAvgW32[i] += (besselTmpFX32 - tmp32no1); // Q12

    logLrtTimeAvgKsumFX += inst->logLrtTimeAvgW32[i]; // Q12
  }
  inst->featureLogLrt = WEBRTC_SPL_RSHIFT_W32(logLrtTimeAvgKsumFX * 5,
                                              inst->stages + 10);
                                                  // 5 = BIN_SIZE_LRT / 2
  // done with computation of LR factor

  //
  //compute the indicator functions
  //

  // average LRT feature
  // FLOAT code
  // indicator0 = 0.5 * (tanh(widthPrior *
  //                      (logLrtTimeAvgKsum - threshPrior0)) + 1.0);
  tmpIndFX = 16384; // Q14(1.0)
  tmp32no1 = logLrtTimeAvgKsumFX - inst->thresholdLogLrt; // Q12
  nShifts = 7 - inst->stages; // WIDTH_PR_MAP_SHIFT - inst->stages + 5;
  //use larger width in tanh map for pause regions
  if (tmp32no1 < 0) {
    tmpIndFX = 0;
    tmp32no1 = -tmp32no1;
    //widthPrior = widthPrior * 2.0;
    nShifts++;
  }
  tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, nShifts); // Q14
  // compute indicator function: sigmoid map
  tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 14);
  if ((tableIndex < 16) && (tableIndex >= 0)) {
    tmp16no2 = kIndicatorTable[tableIndex];
    tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
    frac = (int16_t)(tmp32no1 & 0x00003fff); // Q14
    tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
    if (tmpIndFX == 0) {
      tmpIndFX = 8192 - tmp16no2; // Q14
    } else {
      tmpIndFX = 8192 + tmp16no2; // Q14
    }
  }
  indPriorFX = WEBRTC_SPL_MUL_16_16(inst->weightLogLrt, tmpIndFX); // 6*Q14

  //spectral flatness feature
  if (inst->weightSpecFlat) {
    tmpU32no1 = WEBRTC_SPL_UMUL(inst->featureSpecFlat, 400); // Q10
    tmpIndFX = 16384; // Q14(1.0)
    //use larger width in tanh map for pause regions
    tmpU32no2 = inst->thresholdSpecFlat - tmpU32no1; //Q10
    nShifts = 4;
    if (inst->thresholdSpecFlat < tmpU32no1) {
      tmpIndFX = 0;
      tmpU32no2 = tmpU32no1 - inst->thresholdSpecFlat;
      //widthPrior = widthPrior * 2.0;
      nShifts++;
    }
    tmp32no1 = (int32_t)WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2,
                                                                  nShifts), 25);
                                                     //Q14
    tmpU32no1 = WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, nShifts),
                                    25); //Q14
    // compute indicator function: sigmoid map
    // FLOAT code
    // indicator1 = 0.5 * (tanh(sgnMap * widthPrior *
    //                          (threshPrior1 - tmpFloat1)) + 1.0);
    tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14);
    if (tableIndex < 16) {
      tmp16no2 = kIndicatorTable[tableIndex];
      tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
      frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
      tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
      if (tmpIndFX) {
        tmpIndFX = 8192 + tmp16no2; // Q14
      } else {
        tmpIndFX = 8192 - tmp16no2; // Q14
      }
    }
    indPriorFX += WEBRTC_SPL_MUL_16_16(inst->weightSpecFlat, tmpIndFX); // 6*Q14
  }

  //for template spectral-difference
  if (inst->weightSpecDiff) {
    tmpU32no1 = 0;
    if (inst->featureSpecDiff) {
      normTmp = WEBRTC_SPL_MIN(20 - inst->stages,
                               WebRtcSpl_NormU32(inst->featureSpecDiff));
      tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(inst->featureSpecDiff, normTmp);
                                                         // Q(normTmp-2*stages)
      tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->timeAvgMagnEnergy,
                                        20 - inst->stages - normTmp);
      if (tmpU32no2 > 0) {
        // Q(20 - inst->stages)
        tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no1, tmpU32no2);
      } else {
        tmpU32no1 = (uint32_t)(0x7fffffff);
      }
    }
    tmpU32no3 = WEBRTC_SPL_UDIV(WEBRTC_SPL_LSHIFT_U32(inst->thresholdSpecDiff,
                                                      17),
                                25);
    tmpU32no2 = tmpU32no1 - tmpU32no3;
    nShifts = 1;
    tmpIndFX = 16384; // Q14(1.0)
    //use larger width in tanh map for pause regions
    if (tmpU32no2 & 0x80000000) {
      tmpIndFX = 0;
      tmpU32no2 = tmpU32no3 - tmpU32no1;
      //widthPrior = widthPrior * 2.0;
      nShifts--;
    }
    tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no2, nShifts);
    // compute indicator function: sigmoid map
    /* FLOAT code
     indicator2 = 0.5 * (tanh(widthPrior * (tmpFloat1 - threshPrior2)) + 1.0);
     */
    tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14);
    if (tableIndex < 16) {
      tmp16no2 = kIndicatorTable[tableIndex];
      tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
      frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
      tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
                    tmp16no1, frac, 14);
      if (tmpIndFX) {
        tmpIndFX = 8192 + tmp16no2;
      } else {
        tmpIndFX = 8192 - tmp16no2;
      }
    }
    indPriorFX += WEBRTC_SPL_MUL_16_16(inst->weightSpecDiff, tmpIndFX); // 6*Q14
  }

  //combine the indicator function with the feature weights
  // FLOAT code
  // indPrior = 1 - (weightIndPrior0 * indicator0 + weightIndPrior1 *
  //                 indicator1 + weightIndPrior2 * indicator2);
  indPriorFX16 = WebRtcSpl_DivW32W16ResW16(98307 - indPriorFX, 6); // Q14
  // done with computing indicator function

  //compute the prior probability
  // FLOAT code
  // inst->priorNonSpeechProb += PRIOR_UPDATE *
  //                             (indPriorNonSpeech - inst->priorNonSpeechProb);
  tmp16 = indPriorFX16 - inst->priorNonSpeechProb; // Q14
  inst->priorNonSpeechProb += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
                                PRIOR_UPDATE_Q14, tmp16, 14); // Q14

  //final speech probability: combine prior model with LR factor:

  memset(nonSpeechProbFinal, 0, sizeof(uint16_t) * inst->magnLen);

  if (inst->priorNonSpeechProb > 0) {
    for (i = 0; i < inst->magnLen; i++) {
      // FLOAT code
      // invLrt = exp(inst->logLrtTimeAvg[i]);
      // invLrt = inst->priorSpeechProb * invLrt;
      // nonSpeechProbFinal[i] = (1.0 - inst->priorSpeechProb) /
      //                         (1.0 - inst->priorSpeechProb + invLrt);
      // invLrt = (1.0 - inst->priorNonSpeechProb) * invLrt;
      // nonSpeechProbFinal[i] = inst->priorNonSpeechProb /
      //                         (inst->priorNonSpeechProb + invLrt);
      if (inst->logLrtTimeAvgW32[i] < 65300) {
        tmp32no1 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(
                                           inst->logLrtTimeAvgW32[i], 23637),
                                         14); // Q12
        intPart = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 12);
        if (intPart < -8) {
          intPart = -8;
        }
        frac = (int16_t)(tmp32no1 & 0x00000fff); // Q12

        // Quadratic approximation of 2^frac
        tmp32no2 = WEBRTC_SPL_RSHIFT_W32(frac * frac * 44, 19); // Q12
        tmp32no2 += WEBRTC_SPL_MUL_16_16_RSFT(frac, 84, 7); // Q12
        invLrtFX = WEBRTC_SPL_LSHIFT_W32(1, 8 + intPart)
                   + WEBRTC_SPL_SHIFT_W32(tmp32no2, intPart - 4); // Q8

        normTmp = WebRtcSpl_NormW32(invLrtFX);
        normTmp2 = WebRtcSpl_NormW16((16384 - inst->priorNonSpeechProb));
        if (normTmp + normTmp2 >= 7) {
          if (normTmp + normTmp2 < 15) {
            invLrtFX = WEBRTC_SPL_RSHIFT_W32(invLrtFX, 15 - normTmp2 - normTmp);
            // Q(normTmp+normTmp2-7)
            tmp32no1 = WEBRTC_SPL_MUL_32_16(invLrtFX,
                                            (16384 - inst->priorNonSpeechProb));
            // Q(normTmp+normTmp2+7)
            invLrtFX = WEBRTC_SPL_SHIFT_W32(tmp32no1, 7 - normTmp - normTmp2);
                                                                  // Q14
          } else {
            tmp32no1 = WEBRTC_SPL_MUL_32_16(invLrtFX,
                                            (16384 - inst->priorNonSpeechProb));
                                                                  // Q22
            invLrtFX = WEBRTC_SPL_RSHIFT_W32(tmp32no1, 8); // Q14
          }

          tmp32no1 = WEBRTC_SPL_LSHIFT_W32((int32_t)inst->priorNonSpeechProb,
                                           8); // Q22

          nonSpeechProbFinal[i] = (uint16_t)WEBRTC_SPL_DIV(tmp32no1,
              (int32_t)inst->priorNonSpeechProb + invLrtFX); // Q8
        }
      }
    }
  }
}