示例#1
0
void WebRtcIsacfix_PitchFilterCore(int loopNumber,
                                   int16_t gain,
                                   size_t index,
                                   int16_t sign,
                                   int16_t* inputState,
                                   int16_t* outputBuf2,
                                   const int16_t* coefficient,
                                   int16_t* inputBuf,
                                   int16_t* outputBuf,
                                   int* index2) {
  int i = 0, j = 0;  /* Loop counters. */
  int16_t* ubufQQpos2 = &outputBuf2[PITCH_BUFFSIZE - (index + 2)];
  int16_t tmpW16 = 0;

  for (i = 0; i < loopNumber; i++) {
    int32_t tmpW32 = 0;

    /* Filter to get fractional pitch. */
    for (j = 0; j < PITCH_FRACORDER; j++) {
      tmpW32 += ubufQQpos2[*index2 + j] * coefficient[j];
    }

    /* Saturate to avoid overflow in tmpW16. */
    tmpW32 = WEBRTC_SPL_SAT(536862719, tmpW32, -536879104);
    tmpW32 += 8192;
    tmpW16 = (int16_t)(tmpW32 >> 14);

    /* Shift low pass filter state. */
    memmove(&inputState[1], &inputState[0],
            (PITCH_DAMPORDER - 1) * sizeof(int16_t));
    inputState[0] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
                      gain, tmpW16, 12);

    /* Low pass filter. */
    tmpW32 = 0;
    /* TODO(kma): Define a static inline function WebRtcSpl_DotProduct()
       in spl_inl.h to replace this and other similar loops. */
    for (j = 0; j < PITCH_DAMPORDER; j++) {
      tmpW32 += inputState[j] * kDampFilter[j];
    }

    /* Saturate to avoid overflow in tmpW16. */
    tmpW32 = WEBRTC_SPL_SAT(1073725439, tmpW32, -1073758208);
    tmpW32 += 16384;
    tmpW16 = (int16_t)(tmpW32 >> 15);

    /* Subtract from input and update buffer. */
    tmpW32 = inputBuf[*index2] - sign * tmpW16;
    outputBuf[*index2] = WebRtcSpl_SatW32ToW16(tmpW32);
    tmpW32 = inputBuf[*index2] + outputBuf[*index2];
    outputBuf2[*index2 + PITCH_BUFFSIZE] = WebRtcSpl_SatW32ToW16(tmpW32);

    (*index2)++;
  }
}
示例#2
0
/* The conversion is implemented by the step-down algorithm */
void WebRtcSpl_AToK_JSK(
    WebRtc_Word16 *a16, /* Q11 */
    WebRtc_Word16 useOrder,
    WebRtc_Word16 *k16  /* Q15 */
                        )
{
  int m, k;
  WebRtc_Word32 tmp32[MAX_AR_MODEL_ORDER];
  WebRtc_Word32 tmp32b;
  WebRtc_Word32 tmp_inv_denum32;
  WebRtc_Word16 tmp_inv_denum16;

  k16[useOrder-1]= WEBRTC_SPL_LSHIFT_W16(a16[useOrder], 4); //Q11<<4 => Q15

  for (m=useOrder-1; m>0; m--) {
    tmp_inv_denum32 = ((WebRtc_Word32) 1073741823) - WEBRTC_SPL_MUL_16_16(k16[m], k16[m]); // (1 - k^2) in Q30
    tmp_inv_denum16 = (WebRtc_Word16) WEBRTC_SPL_RSHIFT_W32(tmp_inv_denum32, 15); // (1 - k^2) in Q15

    for (k=1; k<=m; k++) {
      tmp32b = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)a16[k], 16) -
          WEBRTC_SPL_LSHIFT_W32(WEBRTC_SPL_MUL_16_16(k16[m], a16[m-k+1]), 1);

      tmp32[k] = WebRtcSpl_DivW32W16(tmp32b, tmp_inv_denum16); //Q27/Q15 = Q12
    }

    for (k=1; k<m; k++) {
      a16[k] = (WebRtc_Word16) WEBRTC_SPL_RSHIFT_W32(tmp32[k], 1); //Q12>>1 => Q11
    }

    tmp32[m] = WEBRTC_SPL_SAT(4092, tmp32[m], -4092);
    k16[m-1] = (WebRtc_Word16) WEBRTC_SPL_LSHIFT_W32(tmp32[m], 3); //Q12<<3 => Q15
  }

  return;
}
示例#3
0
void WebRtcSpl_LpcToReflCoef(int16_t* a16, int use_order, int16_t* k16)
{
    int m, k;
    int32_t tmp32[SPL_LPC_TO_REFL_COEF_MAX_AR_MODEL_ORDER];
    int32_t tmp_inv_denom32;
    int16_t tmp_inv_denom16;

    k16[use_order - 1] = a16[use_order] << 3;  // Q12<<3 => Q15
    for (m = use_order - 1; m > 0; m--)
    {
        // (1 - k^2) in Q30
        tmp_inv_denom32 = 1073741823 - k16[m] * k16[m];
        // (1 - k^2) in Q15
        tmp_inv_denom16 = (int16_t)(tmp_inv_denom32 >> 15);

        for (k = 1; k <= m; k++)
        {
            // tmp[k] = (a[k] - RC[m] * a[m-k+1]) / (1.0 - RC[m]*RC[m]);

            // [Q12<<16 - (Q15*Q12)<<1] = [Q28 - Q28] = Q28
            tmp32[k] = (a16[k] << 16) - (k16[m] * a16[m - k + 1] << 1);

            tmp32[k] = WebRtcSpl_DivW32W16(tmp32[k], tmp_inv_denom16); //Q28/Q15 = Q13
        }

        for (k = 1; k < m; k++)
        {
            a16[k] = (int16_t)(tmp32[k] >> 1);  // Q13>>1 => Q12
        }

        tmp32[m] = WEBRTC_SPL_SAT(8191, tmp32[m], -8191);
        k16[m - 1] = (int16_t)WEBRTC_SPL_LSHIFT_W32(tmp32[m], 2); //Q13<<2 => Q15
    }
    return;
}
示例#4
0
void WebRtcSpl_FilterMAFastQ12(const int16_t* in_ptr,
                               int16_t* out_ptr,
                               const int16_t* B,
                               size_t B_length,
                               size_t length)
{
    size_t i, j;
    for (i = 0; i < length; i++)
    {
        int32_t o = 0;

        for (j = 0; j < B_length; j++)
        {
          o += B[j] * in_ptr[i - j];
        }

        // If output is higher than 32768, saturate it. Same with negative side
        // 2^27 = 134217728, which corresponds to 32768 in Q12

        // Saturate the output
        o = WEBRTC_SPL_SAT((int32_t)134215679, o, (int32_t)-134217728);

        *out_ptr++ = (int16_t)((o + (int32_t)2048) >> 12);
    }
    return;
}
示例#5
0
文件: aec_core.c 项目: ikonin/WebRTC
static void NonLinearProcessing(aec_t *aec, int *ip, float *wfft, short *output, short *outputH)
{
    float efw[2][PART_LEN1], dfw[2][PART_LEN1];
    complex_t xfw[PART_LEN1];
    complex_t comfortNoiseHband[PART_LEN1];
    float fft[PART_LEN2];
    float scale, dtmp;
    float nlpGainHband;
    int i, j, pos;

    // Coherence and non-linear filter
    float cohde[PART_LEN1], cohxd[PART_LEN1];
    float hNlDeAvg, hNlXdAvg;
    float hNl[PART_LEN1];
    float hNlPref[PREF_BAND_SIZE];
    float hNlFb = 0, hNlFbLow = 0;
    const float prefBandQuant = 0.75f, prefBandQuantLow = 0.5f;
    const int prefBandSize = PREF_BAND_SIZE / aec->mult;
    const int minPrefBand = 4 / aec->mult;

    // Near and error power sums
    float sdSum = 0, seSum = 0;

    // Power estimate smoothing coefficients
    const float gCoh[2][2] = {{0.9f, 0.1f}, {0.93f, 0.07f}};
    const float *ptrGCoh = gCoh[aec->mult - 1];

    // Filter energey
    float wfEnMax = 0, wfEn = 0;
    const int delayEstInterval = 10 * aec->mult;

    aec->delayEstCtr++;
    if (aec->delayEstCtr == delayEstInterval) {
        aec->delayEstCtr = 0;
    }

    // initialize comfort noise for H band
    memset(comfortNoiseHband, 0, sizeof(comfortNoiseHband));
    nlpGainHband = (float)0.0;
    dtmp = (float)0.0;

    // Measure energy in each filter partition to determine delay.
    // TODO: Spread by computing one partition per block?
    if (aec->delayEstCtr == 0) {
        wfEnMax = 0;
        aec->delayIdx = 0;
        for (i = 0; i < NR_PART; i++) {
            pos = i * PART_LEN1;
            wfEn = 0;
            for (j = 0; j < PART_LEN1; j++) {
                wfEn += aec->wfBuf[0][pos + j] * aec->wfBuf[0][pos + j] +
                    aec->wfBuf[1][pos + j] * aec->wfBuf[1][pos + j];
            }

            if (wfEn > wfEnMax) {
                wfEnMax = wfEn;
                aec->delayIdx = i;
            }
        }
    }

    // NLP
    // Windowed far fft
    for (i = 0; i < PART_LEN; i++) {
        fft[i] = aec->xBuf[i] * sqrtHanning[i];
        fft[PART_LEN + i] = aec->xBuf[PART_LEN + i] * sqrtHanning[PART_LEN - i];
    }
    aec_rdft_128(1, fft, ip, wfft);

    xfw[0][1] = 0;
    xfw[PART_LEN][1] = 0;
    xfw[0][0] = fft[0];
    xfw[PART_LEN][0] = fft[1];
    for (i = 1; i < PART_LEN; i++) {
        xfw[i][0] = fft[2 * i];
        xfw[i][1] = fft[2 * i + 1];
    }

    // Buffer far.
    memcpy(aec->xfwBuf, xfw, sizeof(xfw));

    // Use delayed far.
    memcpy(xfw, aec->xfwBuf + aec->delayIdx * PART_LEN1, sizeof(xfw));

    // Windowed near fft
    for (i = 0; i < PART_LEN; i++) {
        fft[i] = aec->dBuf[i] * sqrtHanning[i];
        fft[PART_LEN + i] = aec->dBuf[PART_LEN + i] * sqrtHanning[PART_LEN - i];
    }
    aec_rdft_128(1, fft, ip, wfft);

    dfw[1][0] = 0;
    dfw[1][PART_LEN] = 0;
    dfw[0][0] = fft[0];
    dfw[0][PART_LEN] = fft[1];
    for (i = 1; i < PART_LEN; i++) {
        dfw[0][i] = fft[2 * i];
        dfw[1][i] = fft[2 * i + 1];
    }

    // Windowed error fft
    for (i = 0; i < PART_LEN; i++) {
        fft[i] = aec->eBuf[i] * sqrtHanning[i];
        fft[PART_LEN + i] = aec->eBuf[PART_LEN + i] * sqrtHanning[PART_LEN - i];
    }
    aec_rdft_128(1, fft, ip, wfft);
    efw[1][0] = 0;
    efw[1][PART_LEN] = 0;
    efw[0][0] = fft[0];
    efw[0][PART_LEN] = fft[1];
    for (i = 1; i < PART_LEN; i++) {
        efw[0][i] = fft[2 * i];
        efw[1][i] = fft[2 * i + 1];
    }

    // Smoothed PSD
    for (i = 0; i < PART_LEN1; i++) {
        aec->sd[i] = ptrGCoh[0] * aec->sd[i] + ptrGCoh[1] *
            (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]);
        aec->se[i] = ptrGCoh[0] * aec->se[i] + ptrGCoh[1] *
            (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]);
        // We threshold here to protect against the ill-effects of a zero farend.
        // The threshold is not arbitrarily chosen, but balances protection and
        // adverse interaction with the algorithm's tuning.
        // TODO: investigate further why this is so sensitive.
        aec->sx[i] = ptrGCoh[0] * aec->sx[i] + ptrGCoh[1] *
            WEBRTC_SPL_MAX(xfw[i][0] * xfw[i][0] + xfw[i][1] * xfw[i][1], 15);

        aec->sde[i][0] = ptrGCoh[0] * aec->sde[i][0] + ptrGCoh[1] *
            (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]);
        aec->sde[i][1] = ptrGCoh[0] * aec->sde[i][1] + ptrGCoh[1] *
            (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]);

        aec->sxd[i][0] = ptrGCoh[0] * aec->sxd[i][0] + ptrGCoh[1] *
            (dfw[0][i] * xfw[i][0] + dfw[1][i] * xfw[i][1]);
        aec->sxd[i][1] = ptrGCoh[0] * aec->sxd[i][1] + ptrGCoh[1] *
            (dfw[0][i] * xfw[i][1] - dfw[1][i] * xfw[i][0]);

        sdSum += aec->sd[i];
        seSum += aec->se[i];
    }

    // Divergent filter safeguard.
    if (aec->divergeState == 0) {
        if (seSum > sdSum) {
            aec->divergeState = 1;
        }
    }
    else {
        if (seSum * 1.05f < sdSum) {
            aec->divergeState = 0;
        }
    }

    if (aec->divergeState == 1) {
        memcpy(efw, dfw, sizeof(efw));
    }

    // Reset if error is significantly larger than nearend (13 dB).
    if (seSum > (19.95f * sdSum)) {
        memset(aec->wfBuf, 0, sizeof(aec->wfBuf));
    }

    // Subband coherence
    for (i = 0; i < PART_LEN1; i++) {
        cohde[i] = (aec->sde[i][0] * aec->sde[i][0] + aec->sde[i][1] * aec->sde[i][1]) /
            (aec->sd[i] * aec->se[i] + 1e-10f);
        cohxd[i] = (aec->sxd[i][0] * aec->sxd[i][0] + aec->sxd[i][1] * aec->sxd[i][1]) /
            (aec->sx[i] * aec->sd[i] + 1e-10f);
    }

    hNlXdAvg = 0;
    for (i = minPrefBand; i < prefBandSize + minPrefBand; i++) {
        hNlXdAvg += cohxd[i];
    }
    hNlXdAvg /= prefBandSize;
    hNlXdAvg = 1 - hNlXdAvg;

    hNlDeAvg = 0;
    for (i = minPrefBand; i < prefBandSize + minPrefBand; i++) {
        hNlDeAvg += cohde[i];
    }
    hNlDeAvg /= prefBandSize;

    if (hNlXdAvg < 0.75f && hNlXdAvg < aec->hNlXdAvgMin) {
        aec->hNlXdAvgMin = hNlXdAvg;
    }

    if (hNlDeAvg > 0.98f && hNlXdAvg > 0.9f) {
        aec->stNearState = 1;
    }
    else if (hNlDeAvg < 0.95f || hNlXdAvg < 0.8f) {
        aec->stNearState = 0;
    }

    if (aec->hNlXdAvgMin == 1) {
        aec->echoState = 0;
        aec->overDrive = aec->minOverDrive;

        if (aec->stNearState == 1) {
            memcpy(hNl, cohde, sizeof(hNl));
            hNlFb = hNlDeAvg;
            hNlFbLow = hNlDeAvg;
        }
        else {
            for (i = 0; i < PART_LEN1; i++) {
                hNl[i] = 1 - cohxd[i];
            }
            hNlFb = hNlXdAvg;
            hNlFbLow = hNlXdAvg;
        }
    }
    else {

        if (aec->stNearState == 1) {
            aec->echoState = 0;
            memcpy(hNl, cohde, sizeof(hNl));
            hNlFb = hNlDeAvg;
            hNlFbLow = hNlDeAvg;
        }
        else {
            aec->echoState = 1;
            for (i = 0; i < PART_LEN1; i++) {
                hNl[i] = WEBRTC_SPL_MIN(cohde[i], 1 - cohxd[i]);
            }

            // Select an order statistic from the preferred bands.
            // TODO: Using quicksort now, but a selection algorithm may be preferred.
            memcpy(hNlPref, &hNl[minPrefBand], sizeof(float) * prefBandSize);
            qsort(hNlPref, prefBandSize, sizeof(float), CmpFloat);
            hNlFb = hNlPref[(int)floor(prefBandQuant * (prefBandSize - 1))];
            hNlFbLow = hNlPref[(int)floor(prefBandQuantLow * (prefBandSize - 1))];
        }
    }

    // Track the local filter minimum to determine suppression overdrive.
    if (hNlFbLow < 0.6f && hNlFbLow < aec->hNlFbLocalMin) {
        aec->hNlFbLocalMin = hNlFbLow;
        aec->hNlFbMin = hNlFbLow;
        aec->hNlNewMin = 1;
        aec->hNlMinCtr = 0;
    }
    aec->hNlFbLocalMin = WEBRTC_SPL_MIN(aec->hNlFbLocalMin + 0.0008f / aec->mult, 1);
    aec->hNlXdAvgMin = WEBRTC_SPL_MIN(aec->hNlXdAvgMin + 0.0006f / aec->mult, 1);

    if (aec->hNlNewMin == 1) {
        aec->hNlMinCtr++;
    }
    if (aec->hNlMinCtr == 2) {
        aec->hNlNewMin = 0;
        aec->hNlMinCtr = 0;
        aec->overDrive = WEBRTC_SPL_MAX(aec->targetSupp /
            ((float)log(aec->hNlFbMin + 1e-10f) + 1e-10f), aec->minOverDrive);
    }

    // Smooth the overdrive.
    if (aec->overDrive < aec->overDriveSm) {
      aec->overDriveSm = 0.99f * aec->overDriveSm + 0.01f * aec->overDrive;
    }
    else {
      aec->overDriveSm = 0.9f * aec->overDriveSm + 0.1f * aec->overDrive;
    }

    WebRtcAec_OverdriveAndSuppress(aec, hNl, hNlFb, efw);

#ifdef G167
    if (aec->cnToggle) {
      ComfortNoise(aec, efw, comfortNoiseHband, aec->noisePow, hNl);
    }
#else
    // Add comfort noise.
    ComfortNoise(aec, efw, comfortNoiseHband, aec->noisePow, hNl);
#endif

    // Inverse error fft.
    fft[0] = efw[0][0];
    fft[1] = efw[0][PART_LEN];
    for (i = 1; i < PART_LEN; i++) {
        fft[2*i] = efw[0][i];
        // Sign change required by Ooura fft.
        fft[2*i + 1] = -efw[1][i];
    }
    aec_rdft_128(-1, fft, ip, wfft);

    // Overlap and add to obtain output.
    scale = 2.0f / PART_LEN2;
    for (i = 0; i < PART_LEN; i++) {
        fft[i] *= scale; // fft scaling
        fft[i] = fft[i]*sqrtHanning[i] + aec->outBuf[i];

        // Saturation protection
        output[i] = (short)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, fft[i],
            WEBRTC_SPL_WORD16_MIN);

        fft[PART_LEN + i] *= scale; // fft scaling
        aec->outBuf[i] = fft[PART_LEN + i] * sqrtHanning[PART_LEN - i];
    }

    // For H band
    if (aec->sampFreq == 32000) {

        // H band gain
        // average nlp over low band: average over second half of freq spectrum
        // (4->8khz)
        GetHighbandGain(hNl, &nlpGainHband);

        // Inverse comfort_noise
        if (flagHbandCn == 1) {
            fft[0] = comfortNoiseHband[0][0];
            fft[1] = comfortNoiseHband[PART_LEN][0];
            for (i = 1; i < PART_LEN; i++) {
                fft[2*i] = comfortNoiseHband[i][0];
                fft[2*i + 1] = comfortNoiseHband[i][1];
            }
            aec_rdft_128(-1, fft, ip, wfft);
            scale = 2.0f / PART_LEN2;
        }

        // compute gain factor
        for (i = 0; i < PART_LEN; i++) {
            dtmp = (float)aec->dBufH[i];
            dtmp = (float)dtmp * nlpGainHband; // for variable gain

            // add some comfort noise where Hband is attenuated
            if (flagHbandCn == 1) {
                fft[i] *= scale; // fft scaling
                dtmp += cnScaleHband * fft[i];
            }

            // Saturation protection
            outputH[i] = (short)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, dtmp,
                WEBRTC_SPL_WORD16_MIN);
         }
    }

    // Copy the current block to the old position.
    memcpy(aec->xBuf, aec->xBuf + PART_LEN, sizeof(float) * PART_LEN);
    memcpy(aec->dBuf, aec->dBuf + PART_LEN, sizeof(float) * PART_LEN);
    memcpy(aec->eBuf, aec->eBuf + PART_LEN, sizeof(float) * PART_LEN);

    // Copy the current block to the old position for H band
    if (aec->sampFreq == 32000) {
        memcpy(aec->dBufH, aec->dBufH + PART_LEN, sizeof(float) * PART_LEN);
    }

    memmove(aec->xfwBuf + PART_LEN1, aec->xfwBuf, sizeof(aec->xfwBuf) -
        sizeof(complex_t) * PART_LEN1);
}
示例#6
0
文件: aec_core.c 项目: ikonin/WebRTC
static void ProcessBlock(aec_t *aec, const short *farend,
                              const short *nearend, const short *nearendH,
                              short *output, short *outputH)
{
    int i;
    float d[PART_LEN], y[PART_LEN], e[PART_LEN], dH[PART_LEN];
    short eInt16[PART_LEN];
    float scale;

    float fft[PART_LEN2];
    float xf[2][PART_LEN1], yf[2][PART_LEN1], ef[2][PART_LEN1];
    complex_t df[PART_LEN1];
    int ip[IP_LEN];
    float wfft[W_LEN];

    const float gPow[2] = {0.9f, 0.1f};

    // Noise estimate constants.
    const int noiseInitBlocks = 500 * aec->mult;
    const float step = 0.1f;
    const float ramp = 1.0002f;
    const float gInitNoise[2] = {0.999f, 0.001f};

#ifdef AEC_DEBUG
    fwrite(farend, sizeof(short), PART_LEN, aec->farFile);
    fwrite(nearend, sizeof(short), PART_LEN, aec->nearFile);
#endif

    memset(dH, 0, sizeof(dH));

    // ---------- Ooura fft ----------
    // Concatenate old and new farend blocks.
    for (i = 0; i < PART_LEN; i++) {
        aec->xBuf[i + PART_LEN] = (float)farend[i];
        d[i] = (float)nearend[i];
    }

    if (aec->sampFreq == 32000) {
        for (i = 0; i < PART_LEN; i++) {
            dH[i] = (float)nearendH[i];
        }
    }


    memcpy(fft, aec->xBuf, sizeof(float) * PART_LEN2);
    memcpy(aec->dBuf + PART_LEN, d, sizeof(float) * PART_LEN);
    // For H band
    if (aec->sampFreq == 32000) {
        memcpy(aec->dBufH + PART_LEN, dH, sizeof(float) * PART_LEN);
    }

    // Setting this on the first call initializes work arrays.
    ip[0] = 0;
    aec_rdft_128(1, fft, ip, wfft);

    // Far fft
    xf[1][0] = 0;
    xf[1][PART_LEN] = 0;
    xf[0][0] = fft[0];
    xf[0][PART_LEN] = fft[1];

    for (i = 1; i < PART_LEN; i++) {
        xf[0][i] = fft[2 * i];
        xf[1][i] = fft[2 * i + 1];
    }

    // Near fft
    memcpy(fft, aec->dBuf, sizeof(float) * PART_LEN2);
    aec_rdft_128(1, fft, ip, wfft);
    df[0][1] = 0;
    df[PART_LEN][1] = 0;
    df[0][0] = fft[0];
    df[PART_LEN][0] = fft[1];

    for (i = 1; i < PART_LEN; i++) {
        df[i][0] = fft[2 * i];
        df[i][1] = fft[2 * i + 1];
    }

    // Power smoothing
    for (i = 0; i < PART_LEN1; i++) {
        aec->xPow[i] = gPow[0] * aec->xPow[i] + gPow[1] * NR_PART *
            (xf[0][i] * xf[0][i] + xf[1][i] * xf[1][i]);
        aec->dPow[i] = gPow[0] * aec->dPow[i] + gPow[1] *
            (df[i][0] * df[i][0] + df[i][1] * df[i][1]);
    }

    // Estimate noise power. Wait until dPow is more stable.
    if (aec->noiseEstCtr > 50) {
        for (i = 0; i < PART_LEN1; i++) {
            if (aec->dPow[i] < aec->dMinPow[i]) {
                aec->dMinPow[i] = (aec->dPow[i] + step * (aec->dMinPow[i] -
                    aec->dPow[i])) * ramp;
            }
            else {
                aec->dMinPow[i] *= ramp;
            }
        }
    }

    // Smooth increasing noise power from zero at the start,
    // to avoid a sudden burst of comfort noise.
    if (aec->noiseEstCtr < noiseInitBlocks) {
        aec->noiseEstCtr++;
        for (i = 0; i < PART_LEN1; i++) {
            if (aec->dMinPow[i] > aec->dInitMinPow[i]) {
                aec->dInitMinPow[i] = gInitNoise[0] * aec->dInitMinPow[i] +
                    gInitNoise[1] * aec->dMinPow[i];
            }
            else {
                aec->dInitMinPow[i] = aec->dMinPow[i];
            }
        }
        aec->noisePow = aec->dInitMinPow;
    }
    else {
        aec->noisePow = aec->dMinPow;
    }


    // Update the xfBuf block position.
    aec->xfBufBlockPos--;
    if (aec->xfBufBlockPos == -1) {
        aec->xfBufBlockPos = NR_PART - 1;
    }

    // Buffer xf
    memcpy(aec->xfBuf[0] + aec->xfBufBlockPos * PART_LEN1, xf[0],
           sizeof(float) * PART_LEN1);
    memcpy(aec->xfBuf[1] + aec->xfBufBlockPos * PART_LEN1, xf[1],
           sizeof(float) * PART_LEN1);

    memset(yf[0], 0, sizeof(float) * (PART_LEN1 * 2));

    // Filter far
    WebRtcAec_FilterFar(aec, yf);

    // Inverse fft to obtain echo estimate and error.
    fft[0] = yf[0][0];
    fft[1] = yf[0][PART_LEN];
    for (i = 1; i < PART_LEN; i++) {
        fft[2 * i] = yf[0][i];
        fft[2 * i + 1] = yf[1][i];
    }
    aec_rdft_128(-1, fft, ip, wfft);

    scale = 2.0f / PART_LEN2;
    for (i = 0; i < PART_LEN; i++) {
        y[i] = fft[PART_LEN + i] * scale; // fft scaling
    }

    for (i = 0; i < PART_LEN; i++) {
        e[i] = d[i] - y[i];
    }

    // Error fft
    memcpy(aec->eBuf + PART_LEN, e, sizeof(float) * PART_LEN);
    memset(fft, 0, sizeof(float) * PART_LEN);
    memcpy(fft + PART_LEN, e, sizeof(float) * PART_LEN);
    aec_rdft_128(1, fft, ip, wfft);

    ef[1][0] = 0;
    ef[1][PART_LEN] = 0;
    ef[0][0] = fft[0];
    ef[0][PART_LEN] = fft[1];
    for (i = 1; i < PART_LEN; i++) {
        ef[0][i] = fft[2 * i];
        ef[1][i] = fft[2 * i + 1];
    }

    // Scale error signal inversely with far power.
    WebRtcAec_ScaleErrorSignal(aec, ef);
#ifdef G167
    if (aec->adaptToggle) {
#endif
        // Filter adaptation
        WebRtcAec_FilterAdaptation(aec, fft, ef, ip, wfft);
#ifdef G167
    }
#endif

    NonLinearProcessing(aec, ip, wfft, output, outputH);

#if defined(AEC_DEBUG) || defined(G167)
    for (i = 0; i < PART_LEN; i++) {
        eInt16[i] = (short)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, e[i],
            WEBRTC_SPL_WORD16_MIN);
    }
#endif
#ifdef G167
    if (aec->nlpToggle == 0) {
        memcpy(output, eInt16, sizeof(eInt16));
    }
#endif

    if (aec->metricsMode == 1) {
        for (i = 0; i < PART_LEN; i++) {
            eInt16[i] = (short)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, e[i],
                WEBRTC_SPL_WORD16_MIN);
        }

        // Update power levels and echo metrics
        UpdateLevel(&aec->farlevel, farend);
        UpdateLevel(&aec->nearlevel, nearend);
        UpdateLevel(&aec->linoutlevel, eInt16);
        UpdateLevel(&aec->nlpoutlevel, output);
        UpdateMetrics(aec);
    }

#ifdef AEC_DEBUG
    fwrite(eInt16, sizeof(short), PART_LEN, aec->outLpFile);
    fwrite(output, sizeof(short), PART_LEN, aec->outFile);
#endif
}
示例#7
0
int WebRtcNs_ProcessCore(NSinst_t* inst,
                         float* speechFrame,
                         float* speechFrameHB,
                         float* outFrame,
                         float* outFrameHB) {
  // main routine for noise reduction

  int     flagHB = 0;
  int     i;
  const int kStartBand = 5; // Skip first frequency bins during estimation.
  int     updateParsFlag;

  float   energy1, energy2, gain, factor, factor1, factor2;
  float   signalEnergy, sumMagn;
  float   snrPrior, currentEstimateStsa;
  float   tmpFloat1, tmpFloat2, tmpFloat3, probSpeech, probNonSpeech;
  float   gammaNoiseTmp, gammaNoiseOld;
  float   noiseUpdateTmp, fTmp;
  float   fout[BLOCKL_MAX];
  float   winData[ANAL_BLOCKL_MAX];
  float   magn[HALF_ANAL_BLOCKL], noise[HALF_ANAL_BLOCKL];
  float   theFilter[HALF_ANAL_BLOCKL], theFilterTmp[HALF_ANAL_BLOCKL];
  float   snrLocPost[HALF_ANAL_BLOCKL], snrLocPrior[HALF_ANAL_BLOCKL];
  float   probSpeechFinal[HALF_ANAL_BLOCKL] = { 0 };
  float   previousEstimateStsa[HALF_ANAL_BLOCKL];
  float   real[ANAL_BLOCKL_MAX], imag[HALF_ANAL_BLOCKL];
  // Variables during startup
  float   sum_log_i = 0.0;
  float   sum_log_i_square = 0.0;
  float   sum_log_magn = 0.0;
  float   sum_log_i_log_magn = 0.0;
  float   parametric_noise = 0.0;
  float   parametric_exp = 0.0;
  float   parametric_num = 0.0;

  // SWB variables
  int     deltaBweHB = 1;
  int     deltaGainHB = 1;
  float   decayBweHB = 1.0;
  float   gainMapParHB = 1.0;
  float   gainTimeDomainHB = 1.0;
  float   avgProbSpeechHB, avgProbSpeechHBTmp, avgFilterGainHB, gainModHB;

  // Check that initiation has been done
  if (inst->initFlag != 1) {
    return (-1);
  }
  // Check for valid pointers based on sampling rate
  if (inst->fs == 32000) {
    if (speechFrameHB == NULL) {
      return -1;
    }
    flagHB = 1;
    // range for averaging low band quantities for H band gain
    deltaBweHB = (int)inst->magnLen / 4;
    deltaGainHB = deltaBweHB;
  }
  //
  updateParsFlag = inst->modelUpdatePars[0];
  //

  // update analysis buffer for L band
  memcpy(inst->dataBuf, inst->dataBuf + inst->blockLen10ms,
         sizeof(float) * (inst->anaLen - inst->blockLen10ms));
  memcpy(inst->dataBuf + inst->anaLen - inst->blockLen10ms, speechFrame,
         sizeof(float) * inst->blockLen10ms);

  if (flagHB == 1) {
    // update analysis buffer for H band
    memcpy(inst->dataBufHB, inst->dataBufHB + inst->blockLen10ms,
           sizeof(float) * (inst->anaLen - inst->blockLen10ms));
    memcpy(inst->dataBufHB + inst->anaLen - inst->blockLen10ms, speechFrameHB,
           sizeof(float) * inst->blockLen10ms);
  }

  // check if processing needed
  if (inst->outLen == 0) {
    // windowing
    energy1 = 0.0;
    for (i = 0; i < inst->anaLen; i++) {
      winData[i] = inst->window[i] * inst->dataBuf[i];
      energy1 += winData[i] * winData[i];
    }
    if (energy1 == 0.0) {
      // synthesize the special case of zero input
      // we want to avoid updating statistics in this case:
      // Updating feature statistics when we have zeros only will cause thresholds to
      // move towards zero signal situations. This in turn has the effect that once the
      // signal is "turned on" (non-zero values) everything will be treated as speech
      // and there is no noise suppression effect. Depending on the duration of the
      // inactive signal it takes a considerable amount of time for the system to learn
      // what is noise and what is speech.

      // read out fully processed segment
      for (i = inst->windShift; i < inst->blockLen + inst->windShift; i++) {
        fout[i - inst->windShift] = inst->syntBuf[i];
      }
      // update synthesis buffer
      memcpy(inst->syntBuf, inst->syntBuf + inst->blockLen,
             sizeof(float) * (inst->anaLen - inst->blockLen));
      memset(inst->syntBuf + inst->anaLen - inst->blockLen, 0,
             sizeof(float) * inst->blockLen);

      // out buffer
      inst->outLen = inst->blockLen - inst->blockLen10ms;
      if (inst->blockLen > inst->blockLen10ms) {
        for (i = 0; i < inst->outLen; i++) {
          inst->outBuf[i] = fout[i + inst->blockLen10ms];
        }
      }
      for (i = 0; i < inst->blockLen10ms; ++i)
        outFrame[i] = WEBRTC_SPL_SAT(
            WEBRTC_SPL_WORD16_MAX, fout[i], WEBRTC_SPL_WORD16_MIN);

      // for time-domain gain of HB
      if (flagHB == 1)
        for (i = 0; i < inst->blockLen10ms; ++i)
          outFrameHB[i] = WEBRTC_SPL_SAT(
              WEBRTC_SPL_WORD16_MAX, inst->dataBufHB[i], WEBRTC_SPL_WORD16_MIN);

      return 0;
    }

    //
    inst->blockInd++; // Update the block index only when we process a block.
    // FFT
    WebRtc_rdft(inst->anaLen, 1, winData, inst->ip, inst->wfft);

    imag[0] = 0;
    real[0] = winData[0];
    magn[0] = (float)(fabs(real[0]) + 1.0f);
    imag[inst->magnLen - 1] = 0;
    real[inst->magnLen - 1] = winData[1];
    magn[inst->magnLen - 1] = (float)(fabs(real[inst->magnLen - 1]) + 1.0f);
    signalEnergy = (float)(real[0] * real[0]) + 
                   (float)(real[inst->magnLen - 1] * real[inst->magnLen - 1]);
    sumMagn = magn[0] + magn[inst->magnLen - 1];
    if (inst->blockInd < END_STARTUP_SHORT) {
      inst->initMagnEst[0] += magn[0];
      inst->initMagnEst[inst->magnLen - 1] += magn[inst->magnLen - 1];
      tmpFloat2 = log((float)(inst->magnLen - 1));
      sum_log_i = tmpFloat2;
      sum_log_i_square = tmpFloat2 * tmpFloat2;
      tmpFloat1 = log(magn[inst->magnLen - 1]);
      sum_log_magn = tmpFloat1;
      sum_log_i_log_magn = tmpFloat2 * tmpFloat1;
    }
    for (i = 1; i < inst->magnLen - 1; i++) {
      real[i] = winData[2 * i];
      imag[i] = winData[2 * i + 1];
      // magnitude spectrum
      fTmp = real[i] * real[i];
      fTmp += imag[i] * imag[i];
      signalEnergy += fTmp;
      magn[i] = ((float)sqrt(fTmp)) + 1.0f;
      sumMagn += magn[i];
      if (inst->blockInd < END_STARTUP_SHORT) {
        inst->initMagnEst[i] += magn[i];
        if (i >= kStartBand) {
          tmpFloat2 = log((float)i);
          sum_log_i += tmpFloat2;
          sum_log_i_square += tmpFloat2 * tmpFloat2;
          tmpFloat1 = log(magn[i]);
          sum_log_magn += tmpFloat1;
          sum_log_i_log_magn += tmpFloat2 * tmpFloat1;
        }
      }
    }
    signalEnergy = signalEnergy / ((float)inst->magnLen);
    inst->signalEnergy = signalEnergy;
    inst->sumMagn = sumMagn;

    //compute spectral flatness on input spectrum
    WebRtcNs_ComputeSpectralFlatness(inst, magn);
    // quantile noise estimate
    WebRtcNs_NoiseEstimation(inst, magn, noise);
    //compute simplified noise model during startup
    if (inst->blockInd < END_STARTUP_SHORT) {
      // Estimate White noise
      inst->whiteNoiseLevel += sumMagn / ((float)inst->magnLen) * inst->overdrive;
      // Estimate Pink noise parameters
      tmpFloat1 = sum_log_i_square * ((float)(inst->magnLen - kStartBand));
      tmpFloat1 -= (sum_log_i * sum_log_i);
      tmpFloat2 = (sum_log_i_square * sum_log_magn - sum_log_i * sum_log_i_log_magn);
      tmpFloat3 = tmpFloat2 / tmpFloat1;
      // Constrain the estimated spectrum to be positive
      if (tmpFloat3 < 0.0f) {
        tmpFloat3 = 0.0f;
      }
      inst->pinkNoiseNumerator += tmpFloat3;
      tmpFloat2 = (sum_log_i * sum_log_magn);
      tmpFloat2 -= ((float)(inst->magnLen - kStartBand)) * sum_log_i_log_magn;
      tmpFloat3 = tmpFloat2 / tmpFloat1;
      // Constrain the pink noise power to be in the interval [0, 1];
      if (tmpFloat3 < 0.0f) {
        tmpFloat3 = 0.0f;
      }
      if (tmpFloat3 > 1.0f) {
        tmpFloat3 = 1.0f;
      }
      inst->pinkNoiseExp += tmpFloat3;

      // Calculate frequency independent parts of parametric noise estimate.
      if (inst->pinkNoiseExp == 0.0f) {
        // Use white noise estimate
        parametric_noise = inst->whiteNoiseLevel;
      } else {
        // Use pink noise estimate
        parametric_num = exp(inst->pinkNoiseNumerator / (float)(inst->blockInd + 1));
        parametric_num *= (float)(inst->blockInd + 1);
        parametric_exp = inst->pinkNoiseExp / (float)(inst->blockInd + 1);
        parametric_noise = parametric_num / pow((float)kStartBand, parametric_exp);
      }
      for (i = 0; i < inst->magnLen; i++) {
        // Estimate the background noise using the white and pink noise parameters
        if ((inst->pinkNoiseExp > 0.0f) && (i >= kStartBand)) {
          // Use pink noise estimate
          parametric_noise = parametric_num / pow((float)i, parametric_exp);
        }
        theFilterTmp[i] = (inst->initMagnEst[i] - inst->overdrive * parametric_noise);
        theFilterTmp[i] /= (inst->initMagnEst[i] + (float)0.0001);
        // Weight quantile noise with modeled noise
        noise[i] *= (inst->blockInd);
        tmpFloat2 = parametric_noise * (END_STARTUP_SHORT - inst->blockInd);
        noise[i] += (tmpFloat2 / (float)(inst->blockInd + 1));
        noise[i] /= END_STARTUP_SHORT;
      }
    }
    //compute average signal during END_STARTUP_LONG time:
    // used to normalize spectral difference measure
    if (inst->blockInd < END_STARTUP_LONG) {
      inst->featureData[5] *= inst->blockInd;
      inst->featureData[5] += signalEnergy;
      inst->featureData[5] /= (inst->blockInd + 1);
    }

#ifdef PROCESS_FLOW_0
    if (inst->blockInd > END_STARTUP_LONG) {
      //option: average the quantile noise: for check with AEC2
      for (i = 0; i < inst->magnLen; i++) {
        noise[i] = (float)0.6 * inst->noisePrev[i] + (float)0.4 * noise[i];
      }
      for (i = 0; i < inst->magnLen; i++) {
        // Wiener with over sub-substraction:
        theFilter[i] = (magn[i] - inst->overdrive * noise[i]) / (magn[i] + (float)0.0001);
      }
    }
#else
    //start processing at frames == converged+1
    //
    // STEP 1: compute  prior and post snr based on quantile noise est
    //

    // compute DD estimate of prior SNR: needed for new method
    for (i = 0; i < inst->magnLen; i++) {
      // post snr
      snrLocPost[i] = (float)0.0;
      if (magn[i] > noise[i]) {
        snrLocPost[i] = magn[i] / (noise[i] + (float)0.0001) - (float)1.0;
      }
      // previous post snr
      // previous estimate: based on previous frame with gain filter
      previousEstimateStsa[i] = inst->magnPrev[i] / (inst->noisePrev[i] + (float)0.0001)
                                * (inst->smooth[i]);
      // DD estimate is sum of two terms: current estimate and previous estimate
      // directed decision update of snrPrior
      snrLocPrior[i] = DD_PR_SNR * previousEstimateStsa[i] + ((float)1.0 - DD_PR_SNR)
                       * snrLocPost[i];
      // post and prior snr needed for step 2
    }  // end of loop over freqs
#ifdef PROCESS_FLOW_1
    for (i = 0; i < inst->magnLen; i++) {
      // gain filter
      tmpFloat1 = inst->overdrive + snrLocPrior[i];
      tmpFloat2 = (float)snrLocPrior[i] / tmpFloat1;
      theFilter[i] = (float)tmpFloat2;
    }  // end of loop over freqs
#endif
    // done with step 1: dd computation of prior and post snr

    //
    //STEP 2: compute speech/noise likelihood
    //
#ifdef PROCESS_FLOW_2
    // compute difference of input spectrum with learned/estimated noise spectrum
    WebRtcNs_ComputeSpectralDifference(inst, magn);
    // compute histograms for parameter decisions (thresholds and weights for features)
    // parameters are extracted once every window time (=inst->modelUpdatePars[1])
    if (updateParsFlag >= 1) {
      // counter update
      inst->modelUpdatePars[3]--;
      // update histogram
      if (inst->modelUpdatePars[3] > 0) {
        WebRtcNs_FeatureParameterExtraction(inst, 0);
      }
      // compute model parameters
      if (inst->modelUpdatePars[3] == 0) {
        WebRtcNs_FeatureParameterExtraction(inst, 1);
        inst->modelUpdatePars[3] = inst->modelUpdatePars[1];
        // if wish to update only once, set flag to zero
        if (updateParsFlag == 1) {
          inst->modelUpdatePars[0] = 0;
        } else {
          // update every window:
          // get normalization for spectral difference for next window estimate
          inst->featureData[6] = inst->featureData[6]
                                 / ((float)inst->modelUpdatePars[1]);
          inst->featureData[5] = (float)0.5 * (inst->featureData[6]
                                               + inst->featureData[5]);
          inst->featureData[6] = (float)0.0;
        }
      }
    }
    // compute speech/noise probability
    WebRtcNs_SpeechNoiseProb(inst, probSpeechFinal, snrLocPrior, snrLocPost);
    // time-avg parameter for noise update
    gammaNoiseTmp = NOISE_UPDATE;
    for (i = 0; i < inst->magnLen; i++) {
      probSpeech = probSpeechFinal[i];
      probNonSpeech = (float)1.0 - probSpeech;
      // temporary noise update:
      // use it for speech frames if update value is less than previous
      noiseUpdateTmp = gammaNoiseTmp * inst->noisePrev[i] + ((float)1.0 - gammaNoiseTmp)
                       * (probNonSpeech * magn[i] + probSpeech * inst->noisePrev[i]);
      //
      // time-constant based on speech/noise state
      gammaNoiseOld = gammaNoiseTmp;
      gammaNoiseTmp = NOISE_UPDATE;
      // increase gamma (i.e., less noise update) for frame likely to be speech
      if (probSpeech > PROB_RANGE) {
        gammaNoiseTmp = SPEECH_UPDATE;
      }
      // conservative noise update
      if (probSpeech < PROB_RANGE) {
        inst->magnAvgPause[i] += GAMMA_PAUSE * (magn[i] - inst->magnAvgPause[i]);
      }
      // noise update
      if (gammaNoiseTmp == gammaNoiseOld) {
        noise[i] = noiseUpdateTmp;
      } else {
        noise[i] = gammaNoiseTmp * inst->noisePrev[i] + ((float)1.0 - gammaNoiseTmp)
                   * (probNonSpeech * magn[i] + probSpeech * inst->noisePrev[i]);
        // allow for noise update downwards:
        //  if noise update decreases the noise, it is safe, so allow it to happen
        if (noiseUpdateTmp < noise[i]) {
          noise[i] = noiseUpdateTmp;
        }
      }
    }  // end of freq loop
    // done with step 2: noise update

    //
    // STEP 3: compute dd update of prior snr and post snr based on new noise estimate
    //
    for (i = 0; i < inst->magnLen; i++) {
      // post and prior snr
      currentEstimateStsa = (float)0.0;
      if (magn[i] > noise[i]) {
        currentEstimateStsa = magn[i] / (noise[i] + (float)0.0001) - (float)1.0;
      }
      // DD estimate is sume of two terms: current estimate and previous estimate
      // directed decision update of snrPrior
      snrPrior = DD_PR_SNR * previousEstimateStsa[i] + ((float)1.0 - DD_PR_SNR)
                 * currentEstimateStsa;
      // gain filter
      tmpFloat1 = inst->overdrive + snrPrior;
      tmpFloat2 = (float)snrPrior / tmpFloat1;
      theFilter[i] = (float)tmpFloat2;
    }  // end of loop over freqs
    // done with step3
#endif
#endif

    for (i = 0; i < inst->magnLen; i++) {
      // flooring bottom
      if (theFilter[i] < inst->denoiseBound) {
        theFilter[i] = inst->denoiseBound;
      }
      // flooring top
      if (theFilter[i] > (float)1.0) {
        theFilter[i] = 1.0;
      }
      if (inst->blockInd < END_STARTUP_SHORT) {
        // flooring bottom
        if (theFilterTmp[i] < inst->denoiseBound) {
          theFilterTmp[i] = inst->denoiseBound;
        }
        // flooring top
        if (theFilterTmp[i] > (float)1.0) {
          theFilterTmp[i] = 1.0;
        }
        // Weight the two suppression filters
        theFilter[i] *= (inst->blockInd);
        theFilterTmp[i] *= (END_STARTUP_SHORT - inst->blockInd);
        theFilter[i] += theFilterTmp[i];
        theFilter[i] /= (END_STARTUP_SHORT);
      }
      // smoothing
#ifdef PROCESS_FLOW_0
      inst->smooth[i] *= SMOOTH; // value set to 0.7 in define.h file
      inst->smooth[i] += ((float)1.0 - SMOOTH) * theFilter[i];
#else
      inst->smooth[i] = theFilter[i];
#endif
      real[i] *= inst->smooth[i];
      imag[i] *= inst->smooth[i];
    }
    // keep track of noise and magn spectrum for next frame
    for (i = 0; i < inst->magnLen; i++) {
      inst->noisePrev[i] = noise[i];
      inst->magnPrev[i] = magn[i];
    }
    // back to time domain
    winData[0] = real[0];
    winData[1] = real[inst->magnLen - 1];
    for (i = 1; i < inst->magnLen - 1; i++) {
      winData[2 * i] = real[i];
      winData[2 * i + 1] = imag[i];
    }
    WebRtc_rdft(inst->anaLen, -1, winData, inst->ip, inst->wfft);

    for (i = 0; i < inst->anaLen; i++) {
      real[i] = 2.0f * winData[i] / inst->anaLen; // fft scaling
    }

    //scale factor: only do it after END_STARTUP_LONG time
    factor = (float)1.0;
    if (inst->gainmap == 1 && inst->blockInd > END_STARTUP_LONG) {
      factor1 = (float)1.0;
      factor2 = (float)1.0;

      energy2 = 0.0;
      for (i = 0; i < inst->anaLen; i++) {
        energy2 += (float)real[i] * (float)real[i];
      }
      gain = (float)sqrt(energy2 / (energy1 + (float)1.0));

#ifdef PROCESS_FLOW_2
      // scaling for new version
      if (gain > B_LIM) {
        factor1 = (float)1.0 + (float)1.3 * (gain - B_LIM);
        if (gain * factor1 > (float)1.0) {
          factor1 = (float)1.0 / gain;
        }
      }
      if (gain < B_LIM) {
        //don't reduce scale too much for pause regions:
        // attenuation here should be controlled by flooring
        if (gain <= inst->denoiseBound) {
          gain = inst->denoiseBound;
        }
        factor2 = (float)1.0 - (float)0.3 * (B_LIM - gain);
      }
      //combine both scales with speech/noise prob:
      // note prior (priorSpeechProb) is not frequency dependent
      factor = inst->priorSpeechProb * factor1 + ((float)1.0 - inst->priorSpeechProb)
               * factor2;
#else
      if (gain > B_LIM) {
        factor = (float)1.0 + (float)1.3 * (gain - B_LIM);
      } else {
        factor = (float)1.0 + (float)2.0 * (gain - B_LIM);
      }
      if (gain * factor > (float)1.0) {
        factor = (float)1.0 / gain;
      }
#endif
    }  // out of inst->gainmap==1

    // synthesis
    for (i = 0; i < inst->anaLen; i++) {
      inst->syntBuf[i] += factor * inst->window[i] * (float)real[i];
    }
    // read out fully processed segment
    for (i = inst->windShift; i < inst->blockLen + inst->windShift; i++) {
      fout[i - inst->windShift] = inst->syntBuf[i];
    }
    // update synthesis buffer
    memcpy(inst->syntBuf, inst->syntBuf + inst->blockLen,
           sizeof(float) * (inst->anaLen - inst->blockLen));
    memset(inst->syntBuf + inst->anaLen - inst->blockLen, 0,
           sizeof(float) * inst->blockLen);

    // out buffer
    inst->outLen = inst->blockLen - inst->blockLen10ms;
    if (inst->blockLen > inst->blockLen10ms) {
      for (i = 0; i < inst->outLen; i++) {
        inst->outBuf[i] = fout[i + inst->blockLen10ms];
      }
    }
  }  // end of if out.len==0
  else {
    for (i = 0; i < inst->blockLen10ms; i++) {
      fout[i] = inst->outBuf[i];
    }
    memcpy(inst->outBuf, inst->outBuf + inst->blockLen10ms,
           sizeof(float) * (inst->outLen - inst->blockLen10ms));
    memset(inst->outBuf + inst->outLen - inst->blockLen10ms, 0,
           sizeof(float) * inst->blockLen10ms);
    inst->outLen -= inst->blockLen10ms;
  }

  for (i = 0; i < inst->blockLen10ms; ++i)
    outFrame[i] = WEBRTC_SPL_SAT(
        WEBRTC_SPL_WORD16_MAX, fout[i], WEBRTC_SPL_WORD16_MIN);

  // for time-domain gain of HB
  if (flagHB == 1) {
    for (i = 0; i < inst->magnLen; i++) {
      inst->speechProbHB[i] = probSpeechFinal[i];
    }
    // average speech prob from low band
    // avg over second half (i.e., 4->8kHz) of freq. spectrum
    avgProbSpeechHB = 0.0;
    for (i = inst->magnLen - deltaBweHB - 1; i < inst->magnLen - 1; i++) {
      avgProbSpeechHB += inst->speechProbHB[i];
    }
    avgProbSpeechHB = avgProbSpeechHB / ((float)deltaBweHB);
    // average filter gain from low band
    // average over second half (i.e., 4->8kHz) of freq. spectrum
    avgFilterGainHB = 0.0;
    for (i = inst->magnLen - deltaGainHB - 1; i < inst->magnLen - 1; i++) {
      avgFilterGainHB += inst->smooth[i];
    }
    avgFilterGainHB = avgFilterGainHB / ((float)(deltaGainHB));
    avgProbSpeechHBTmp = (float)2.0 * avgProbSpeechHB - (float)1.0;
    // gain based on speech prob:
    gainModHB = (float)0.5 * ((float)1.0 + (float)tanh(gainMapParHB * avgProbSpeechHBTmp));
    //combine gain with low band gain
    gainTimeDomainHB = (float)0.5 * gainModHB + (float)0.5 * avgFilterGainHB;
    if (avgProbSpeechHB >= (float)0.5) {
      gainTimeDomainHB = (float)0.25 * gainModHB + (float)0.75 * avgFilterGainHB;
    }
    gainTimeDomainHB = gainTimeDomainHB * decayBweHB;
    //make sure gain is within flooring range
    // flooring bottom
    if (gainTimeDomainHB < inst->denoiseBound) {
      gainTimeDomainHB = inst->denoiseBound;
    }
    // flooring top
    if (gainTimeDomainHB > (float)1.0) {
      gainTimeDomainHB = 1.0;
    }
    //apply gain
    for (i = 0; i < inst->blockLen10ms; i++) {
      float o = gainTimeDomainHB * inst->dataBufHB[i];
      outFrameHB[i] = WEBRTC_SPL_SAT(
          WEBRTC_SPL_WORD16_MAX, o, WEBRTC_SPL_WORD16_MIN);
    }
  }  // end of H band gain computation
  //

  return 0;
}
示例#8
0
void WebRtcIsacfix_PitchFilterGains(const int16_t* indatQ0,
                                    PitchFiltstr* pfp,
                                    int16_t* lagsQ7,
                                    int16_t* gainsQ12) {
  int  k, n, m, ind, pos, pos3QQ;

  int16_t ubufQQ[PITCH_INTBUFFSIZE];
  int16_t oldLagQ7, lagdeltaQ7, curLagQ7;
  const int16_t* fracoeffQQ = NULL;
  int16_t scale;
  int16_t cnt = 0, frcQQ, indW16 = 0, tmpW16;
  int32_t tmpW32, tmp2W32, csum1QQ, esumxQQ;

  // Set up buffer and states.
  memcpy(ubufQQ, pfp->ubufQQ, sizeof(pfp->ubufQQ));
  oldLagQ7 = pfp->oldlagQ7;

  // No interpolation if pitch lag step is big.
  if ((WEBRTC_SPL_MUL_16_16_RSFT(lagsQ7[0], 3, 1) < oldLagQ7) ||
      (lagsQ7[0] > WEBRTC_SPL_MUL_16_16_RSFT(oldLagQ7, 3, 1))) {
    oldLagQ7 = lagsQ7[0];
  }

  ind = 0;
  pos = ind + PITCH_BUFFSIZE;
  scale = 0;
  for (k = 0; k < PITCH_SUBFRAMES; k++) {

    // Calculate interpolation steps.
    lagdeltaQ7 = lagsQ7[k] - oldLagQ7;
    lagdeltaQ7 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
                   lagdeltaQ7, kDivFactor, 15);
    curLagQ7 = oldLagQ7;
    oldLagQ7 = lagsQ7[k];

    csum1QQ = 1;
    esumxQQ = 1;

    // Same as function WebRtcIsacfix_PitchFilter(), we break the pitch
    // filtering into two for-loops (5 x 12) below.
    for (cnt = 0; cnt < kSegments; cnt++) {
      // Update parameters for each segment.
      curLagQ7 += lagdeltaQ7;
      indW16 = (int16_t)CalcLrIntQ(curLagQ7, 7);
      tmpW16 = WEBRTC_SPL_LSHIFT_W16(indW16, 7);
      tmpW16 -= curLagQ7;
      frcQQ = WEBRTC_SPL_RSHIFT_W16(tmpW16, 4);
      frcQQ += 4;

      if (frcQQ == PITCH_FRACS) {
        frcQQ = 0;
      }
      fracoeffQQ = kIntrpCoef[frcQQ];

      pos3QQ = pos - (indW16 + 4);

      for (n = 0; n < PITCH_SUBFRAME_LEN / kSegments; n++) {
        // Filter to get fractional pitch.

        tmpW32 = 0;
        for (m = 0; m < PITCH_FRACORDER; m++) {
          tmpW32 += WEBRTC_SPL_MUL_16_16(ubufQQ[pos3QQ + m], fracoeffQQ[m]);
        }

        // Subtract from input and update buffer.
        ubufQQ[pos] = indatQ0[ind];

        tmp2W32 = WEBRTC_SPL_MUL_16_32_RSFT14(indatQ0[ind], tmpW32);
        tmpW32 += 8192;
        tmpW16 = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmpW32, 14);
        tmpW32 = WEBRTC_SPL_MUL_16_16(tmpW16, tmpW16);

        if ((tmp2W32 > 1073700000) || (csum1QQ > 1073700000) ||
            (tmpW32 > 1073700000) || (esumxQQ > 1073700000)) {  // 2^30
          scale++;
          csum1QQ = WEBRTC_SPL_RSHIFT_W32(csum1QQ, 1);
          esumxQQ = WEBRTC_SPL_RSHIFT_W32(esumxQQ, 1);
        }
        tmp2W32 = WEBRTC_SPL_RSHIFT_W32(tmp2W32, scale);
        csum1QQ += tmp2W32;
        tmpW32 = WEBRTC_SPL_RSHIFT_W32(tmpW32, scale);
        esumxQQ += tmpW32;

        ind++;
        pos++;
        pos3QQ++;
      }
    }

    if (csum1QQ < esumxQQ) {
      tmp2W32 = WebRtcSpl_DivResultInQ31(csum1QQ, esumxQQ);

      // Gain should be half the correlation.
      tmpW32 = WEBRTC_SPL_RSHIFT_W32(tmp2W32, 20);
    } else {
      tmpW32 = 4096;
    }
    gainsQ12[k] = (int16_t)WEBRTC_SPL_SAT(PITCH_MAX_GAIN_Q12, tmpW32, 0);
  }

  // Export buffer and states.
  memcpy(pfp->ubufQQ, ubufQQ + PITCH_FRAME_LEN, sizeof(pfp->ubufQQ));
  pfp->oldlagQ7 = lagsQ7[PITCH_SUBFRAMES - 1];
  pfp->oldgainQ12 = gainsQ12[PITCH_SUBFRAMES - 1];

}
示例#9
0
static void ProcessBlock(aec_t* aec) {
    int i;
    float d[PART_LEN], y[PART_LEN], e[PART_LEN], dH[PART_LEN];
    float scale;

    float fft[PART_LEN2];
    float xf[2][PART_LEN1], yf[2][PART_LEN1], ef[2][PART_LEN1];
    float df[2][PART_LEN1];
    float far_spectrum = 0.0f;
    float near_spectrum = 0.0f;
    float abs_far_spectrum[PART_LEN1];
    float abs_near_spectrum[PART_LEN1];

    const float gPow[2] = {0.9f, 0.1f};

    // Noise estimate constants.
    const int noiseInitBlocks = 500 * aec->mult;
    const float step = 0.1f;
    const float ramp = 1.0002f;
    const float gInitNoise[2] = {0.999f, 0.001f};

    int16_t nearend[PART_LEN];
    int16_t* nearend_ptr = NULL;
    int16_t output[PART_LEN];
    int16_t outputH[PART_LEN];

    float* xf_ptr = NULL;

    memset(dH, 0, sizeof(dH));
    if (aec->sampFreq == 32000) {
      // Get the upper band first so we can reuse |nearend|.
      WebRtc_ReadBuffer(aec->nearFrBufH,
                        (void**) &nearend_ptr,
                        nearend,
                        PART_LEN);
      for (i = 0; i < PART_LEN; i++) {
          dH[i] = (float) (nearend_ptr[i]);
      }
      memcpy(aec->dBufH + PART_LEN, dH, sizeof(float) * PART_LEN);
    }
    WebRtc_ReadBuffer(aec->nearFrBuf, (void**) &nearend_ptr, nearend, PART_LEN);

    // ---------- Ooura fft ----------
    // Concatenate old and new nearend blocks.
    for (i = 0; i < PART_LEN; i++) {
        d[i] = (float) (nearend_ptr[i]);
    }
    memcpy(aec->dBuf + PART_LEN, d, sizeof(float) * PART_LEN);

#ifdef WEBRTC_AEC_DEBUG_DUMP
    {
        int16_t farend[PART_LEN];
        int16_t* farend_ptr = NULL;
        WebRtc_ReadBuffer(aec->far_time_buf, (void**) &farend_ptr, farend, 1);
        (void)fwrite(farend_ptr, sizeof(int16_t), PART_LEN, aec->farFile);
        (void)fwrite(nearend_ptr, sizeof(int16_t), PART_LEN, aec->nearFile);
    }
#endif

    // We should always have at least one element stored in |far_buf|.
    assert(WebRtc_available_read(aec->far_buf) > 0);
    WebRtc_ReadBuffer(aec->far_buf, (void**) &xf_ptr, &xf[0][0], 1);

    // Near fft
    memcpy(fft, aec->dBuf, sizeof(float) * PART_LEN2);
    TimeToFrequency(fft, df, 0);

    // Power smoothing
    for (i = 0; i < PART_LEN1; i++) {
      far_spectrum = (xf_ptr[i] * xf_ptr[i]) +
          (xf_ptr[PART_LEN1 + i] * xf_ptr[PART_LEN1 + i]);
      aec->xPow[i] = gPow[0] * aec->xPow[i] + gPow[1] * NR_PART * far_spectrum;
      // Calculate absolute spectra
      abs_far_spectrum[i] = sqrtf(far_spectrum);

      near_spectrum = df[0][i] * df[0][i] + df[1][i] * df[1][i];
      aec->dPow[i] = gPow[0] * aec->dPow[i] + gPow[1] * near_spectrum;
      // Calculate absolute spectra
      abs_near_spectrum[i] = sqrtf(near_spectrum);
    }

    // Estimate noise power. Wait until dPow is more stable.
    if (aec->noiseEstCtr > 50) {
        for (i = 0; i < PART_LEN1; i++) {
            if (aec->dPow[i] < aec->dMinPow[i]) {
                aec->dMinPow[i] = (aec->dPow[i] + step * (aec->dMinPow[i] -
                    aec->dPow[i])) * ramp;
            }
            else {
                aec->dMinPow[i] *= ramp;
            }
        }
    }

    // Smooth increasing noise power from zero at the start,
    // to avoid a sudden burst of comfort noise.
    if (aec->noiseEstCtr < noiseInitBlocks) {
        aec->noiseEstCtr++;
        for (i = 0; i < PART_LEN1; i++) {
            if (aec->dMinPow[i] > aec->dInitMinPow[i]) {
                aec->dInitMinPow[i] = gInitNoise[0] * aec->dInitMinPow[i] +
                    gInitNoise[1] * aec->dMinPow[i];
            }
            else {
                aec->dInitMinPow[i] = aec->dMinPow[i];
            }
        }
        aec->noisePow = aec->dInitMinPow;
    }
    else {
        aec->noisePow = aec->dMinPow;
    }

    // Block wise delay estimation used for logging
    if (aec->delay_logging_enabled) {
      int delay_estimate = 0;
      // Estimate the delay
      delay_estimate = WebRtc_DelayEstimatorProcessFloat(aec->delay_estimator,
                                                         abs_far_spectrum,
                                                         abs_near_spectrum,
                                                         PART_LEN1);
      if (delay_estimate >= 0) {
        // Update delay estimate buffer.
        aec->delay_histogram[delay_estimate]++;
      }
    }

    // Update the xfBuf block position.
    aec->xfBufBlockPos--;
    if (aec->xfBufBlockPos == -1) {
        aec->xfBufBlockPos = NR_PART - 1;
    }

    // Buffer xf
    memcpy(aec->xfBuf[0] + aec->xfBufBlockPos * PART_LEN1, xf_ptr,
           sizeof(float) * PART_LEN1);
    memcpy(aec->xfBuf[1] + aec->xfBufBlockPos * PART_LEN1, &xf_ptr[PART_LEN1],
           sizeof(float) * PART_LEN1);

    memset(yf, 0, sizeof(yf));

    // Filter far
    WebRtcAec_FilterFar(aec, yf);

    // Inverse fft to obtain echo estimate and error.
    fft[0] = yf[0][0];
    fft[1] = yf[0][PART_LEN];
    for (i = 1; i < PART_LEN; i++) {
        fft[2 * i] = yf[0][i];
        fft[2 * i + 1] = yf[1][i];
    }
    aec_rdft_inverse_128(fft);

    scale = 2.0f / PART_LEN2;
    for (i = 0; i < PART_LEN; i++) {
        y[i] = fft[PART_LEN + i] * scale; // fft scaling
    }

    for (i = 0; i < PART_LEN; i++) {
        e[i] = d[i] - y[i];
    }

    // Error fft
    memcpy(aec->eBuf + PART_LEN, e, sizeof(float) * PART_LEN);
    memset(fft, 0, sizeof(float) * PART_LEN);
    memcpy(fft + PART_LEN, e, sizeof(float) * PART_LEN);
    // TODO(bjornv): Change to use TimeToFrequency().
    aec_rdft_forward_128(fft);

    ef[1][0] = 0;
    ef[1][PART_LEN] = 0;
    ef[0][0] = fft[0];
    ef[0][PART_LEN] = fft[1];
    for (i = 1; i < PART_LEN; i++) {
        ef[0][i] = fft[2 * i];
        ef[1][i] = fft[2 * i + 1];
    }

    if (aec->metricsMode == 1) {
      // Note that the first PART_LEN samples in fft (before transformation) are
      // zero. Hence, the scaling by two in UpdateLevel() should not be
      // performed. That scaling is taken care of in UpdateMetrics() instead.
      UpdateLevel(&aec->linoutlevel, ef);
    }

    // Scale error signal inversely with far power.
    WebRtcAec_ScaleErrorSignal(aec, ef);
    WebRtcAec_FilterAdaptation(aec, fft, ef);
    NonLinearProcessing(aec, output, outputH);

    if (aec->metricsMode == 1) {
        // Update power levels and echo metrics
        UpdateLevel(&aec->farlevel, (float (*)[PART_LEN1]) xf_ptr);
        UpdateLevel(&aec->nearlevel, df);
        UpdateMetrics(aec);
    }

    // Store the output block.
    WebRtc_WriteBuffer(aec->outFrBuf, output, PART_LEN);
    // For H band
    if (aec->sampFreq == 32000) {
        WebRtc_WriteBuffer(aec->outFrBufH, outputH, PART_LEN);
    }

#ifdef WEBRTC_AEC_DEBUG_DUMP
    {
        int16_t eInt16[PART_LEN];
        for (i = 0; i < PART_LEN; i++) {
            eInt16[i] = (int16_t)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, e[i],
                WEBRTC_SPL_WORD16_MIN);
        }

        (void)fwrite(eInt16, sizeof(int16_t), PART_LEN, aec->outLinearFile);
        (void)fwrite(output, sizeof(int16_t), PART_LEN, aec->outFile);
    }
#endif
}
示例#10
0
void ProcessBlock_background(aec_t *aec, const short *farend,
                         const short *nearend, short *output, short vadState,short *farFrameAtEchoDelay)
{
    int i;
    float d[PART_LEN], y[PART_LEN], e[PART_LEN], dH[PART_LEN];
    short eInt16[PART_LEN];
    float scale;

    float fft[PART_LEN2],fft_re[PART_LEN2],fft_im[PART_LEN2];
    float xf[2][PART_LEN1], yf[2][PART_LEN1], ef[2][PART_LEN1];
    complex_t df[PART_LEN1];
    float far_spectrum = 0.0f;
    float near_spectrum = 0.0f;
    float abs_far_spectrum[PART_LEN1];
    float abs_near_spectrum[PART_LEN1];

    const float gPow[2] = {0.9f, 0.1f};

    // Noise estimate constants.
    const int noiseInitBlocks = 500 * aec->mult;
    const float step = 0.1f;
    const float ramp = 1.0002f;
    const float gInitNoise[2] = {0.999f, 0.001f};


	
    memset(dH, 0, sizeof(dH));

    // ---------- Ooura fft ----------
    // Concatenate old and new farend blocks.
    for (i = 0; i < PART_LEN; i++) {
		aec->xBuf_background[i + PART_LEN] = (float)farend[i];
        d[i] = (float)nearend[i];
    }

    

	memcpy(fft, aec->xBuf_background, sizeof(float) * PART_LEN2);
    memcpy(aec->dBuf_background + PART_LEN, d, sizeof(float) * PART_LEN);
   
	
    aec_rdft_forward_128(fft);

	

    // Far fft
    xf[1][0] = 0;
    xf[1][PART_LEN] = 0;
    xf[0][0] = fft[0];
    xf[0][PART_LEN] = 0;//fft[1];

    for (i = 1; i < PART_LEN; i++) {
        xf[0][i] = fft[2 * i];
        xf[1][i] = fft[2 * i + 1];
    }


    // Near fft
	memcpy(fft, aec->dBuf_background, sizeof(float) * PART_LEN2);
    aec_rdft_forward_128(fft);
    df[0][1] = 0;
    df[PART_LEN][1] = 0;
    df[0][0] = fft[0];
    df[PART_LEN][0] = fft[1];

    for (i = 1; i < PART_LEN; i++) {
        df[i][0] = fft[2 * i];
        df[i][1] = fft[2 * i + 1];
    }


    // Power smoothing
    for (i = 0; i < PART_LEN1; i++) {
      far_spectrum = xf[0][i] * xf[0][i] + xf[1][i] * xf[1][i];
      aec->xPow[i] = gPow[0] * aec->xPow[i] + gPow[1] * NR_PART * far_spectrum;
      // Calculate absolute spectra
      abs_far_spectrum[i] = sqrtf(far_spectrum);

      near_spectrum = df[i][0] * df[i][0] + df[i][1] * df[i][1];
      aec->dPow[i] = gPow[0] * aec->dPow[i] + gPow[1] * near_spectrum;
      // Calculate absolute spectra
      abs_near_spectrum[i] = sqrtf(near_spectrum);
    }

    // Estimate noise power. Wait until dPow is more stable.
    if (aec->noiseEstCtr > 50) {
        for (i = 0; i < PART_LEN1; i++) {
            if (aec->dPow[i] < aec->dMinPow[i]) {
                aec->dMinPow[i] = (aec->dPow[i] + step * (aec->dMinPow[i] -
                    aec->dPow[i])) * ramp;
            }
            else {
                aec->dMinPow[i] *= ramp;
            }
        }
    }

    // Smooth increasing noise power from zero at the start,
    // to avoid a sudden burst of comfort noise.
    if (aec->noiseEstCtr < noiseInitBlocks) {
        aec->noiseEstCtr++;
        for (i = 0; i < PART_LEN1; i++) {
            if (aec->dMinPow[i] > aec->dInitMinPow[i]) {
                aec->dInitMinPow[i] = gInitNoise[0] * aec->dInitMinPow[i] +
                    gInitNoise[1] * aec->dMinPow[i];
            }
            else {
                aec->dInitMinPow[i] = aec->dMinPow[i];
            }
        }
        aec->noisePow = aec->dInitMinPow;
    }
    else {
        aec->noisePow = aec->dMinPow;
    }

   

    // Update the xfBuf block position.
    aec->xfBufBlockPos_background--;
    if (aec->xfBufBlockPos_background == -1) {
        aec->xfBufBlockPos_background = NR_PART - 1;
    }

    // Buffer xf
    memcpy(aec->xfBuf_background[0] + aec->xfBufBlockPos_background * PART_LEN1, xf[0],
           sizeof(float) * PART_LEN1);
    memcpy(aec->xfBuf_background[1] + aec->xfBufBlockPos_background * PART_LEN1, xf[1],
           sizeof(float) * PART_LEN1);

    memset(yf[0], 0, sizeof(float) * (PART_LEN1 * 2));

    // Filter far
    FilterFar_background(aec, yf);

	// Inverse fft to obtain echo estimate and error.
    fft[0] = yf[0][0];
    fft[1] = yf[0][PART_LEN];
    for (i = 1; i < PART_LEN; i++) {
        fft[2 * i] = yf[0][i];
        fft[2 * i + 1] = yf[1][i];
    }
    aec_rdft_inverse_128(fft);

    scale = 2.0f / PART_LEN2;
    for (i = 0; i < PART_LEN; i++) {
        y[i] = fft[PART_LEN + i] * scale; // fft scaling
    }

    for (i = 0; i < PART_LEN; i++) {
        e[i] = d[i] - y[i];
    }
    // Error fft
    memcpy(aec->eBuf_background + PART_LEN, e, sizeof(float) * PART_LEN);
    memset(fft, 0, sizeof(float) * PART_LEN);
    memcpy(fft + PART_LEN, e, sizeof(float) * PART_LEN);
    aec_rdft_forward_128(fft);

    ef[1][0] = 0;
    ef[1][PART_LEN] = 0;
    ef[0][0] = fft[0];
    ef[0][PART_LEN] = fft[1];
    for (i = 1; i < PART_LEN; i++) {
        ef[0][i] = fft[2 * i];
        ef[1][i] = fft[2 * i + 1];
    }


    // Scale error signal inversely with far power.
    WebRtcAec_ScaleErrorSignal(aec, ef);

    if(aec->framePowerAtProbableEchoDelay_shortTerm>1000000)
		FilterAdaptation_background(aec, fft, ef);

	

	aec->background_lt_filteredop_power=(float)(aec->background_lt_filteredop_power*.95+0.05*frame_power(aec->eBuf_background,PART_LEN2));
	for (i = 0; i < PART_LEN; i++) 
	{
				output[i] = (int16_t)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, aec->eBuf_background[i],
				WEBRTC_SPL_WORD16_MIN);
	}

	memcpy(aec->xBuf_background, aec->xBuf_background + PART_LEN, sizeof(float) * PART_LEN);
	memcpy(aec->dBuf_background, aec->dBuf_background + PART_LEN, sizeof(float) * PART_LEN);
	memcpy(aec->eBuf_background, aec->eBuf_background + PART_LEN, sizeof(float) * PART_LEN);

}