Exemplo n.º 1
0
// Get address of region(s) from which we can read data.
// If the region is contiguous, |data_ptr_bytes_2| will be zero.
// If non-contiguous, |data_ptr_bytes_2| will be the size in bytes of the second
// region. Returns room available to be read or |element_count|, whichever is
// smaller.
static size_t GetBufferReadRegions(RingBuffer* buf,
                                   size_t element_count,
                                   void** data_ptr_1,
                                   size_t* data_ptr_bytes_1,
                                   void** data_ptr_2,
                                   size_t* data_ptr_bytes_2) {

  const size_t readable_elements = WebRtc_available_read(buf);
  const size_t read_elements = (readable_elements < element_count ?
      readable_elements : element_count);
  const size_t margin = buf->element_count - buf->read_pos;

  // Check to see if read is not contiguous.
  if (read_elements > margin) {
    // Write data in two blocks that wrap the buffer.
    *data_ptr_1 = buf->data + buf->read_pos * buf->element_size;
    *data_ptr_bytes_1 = margin * buf->element_size;
    *data_ptr_2 = buf->data;
    *data_ptr_bytes_2 = (read_elements - margin) * buf->element_size;
  } else {
    *data_ptr_1 = buf->data + buf->read_pos * buf->element_size;
    *data_ptr_bytes_1 = read_elements * buf->element_size;
    *data_ptr_2 = NULL;
    *data_ptr_bytes_2 = 0;
  }

  return read_elements;
}
Exemplo n.º 2
0
size_t WebRtc_available_write(const RingBuffer* self) {
  if (!self) {
    return 0;
  }

  return self->element_count - WebRtc_available_read(self);
}
Exemplo n.º 3
0
void WebRtcAec_ProcessFrame(aec_t *aec,
                            const short *nearend,
                            const short *nearendH,
                            int knownDelay)
{
    // For each frame the process is as follows:
    // 1) If the system_delay indicates on being too small for processing a
    //    frame we stuff the buffer with enough data for 10 ms.
    // 2) Adjust the buffer to the system delay, by moving the read pointer.
    // 3) If we can't move read pointer due to buffer size limitations we
    //    flush/stuff the buffer.
    // 4) Process as many partitions as possible.
    // 5) Update the |system_delay| with respect to a full frame of FRAME_LEN
    //    samples. Even though we will have data left to process (we work with
    //    partitions) we consider updating a whole frame, since that's the
    //    amount of data we input and output in audio_processing.

    // TODO(bjornv): Investigate how we should round the delay difference; right
    // now we know that incoming |knownDelay| is underestimated when it's less
    // than |aec->knownDelay|. We therefore, round (-32) in that direction. In
    // the other direction, we don't have this situation, but might flush one
    // partition too little. This can cause non-causality, which should be
    // investigated. Maybe, allow for a non-symmetric rounding, like -16.
    int move_elements = (aec->knownDelay - knownDelay - 32) / PART_LEN;
    int moved_elements = 0;

    // TODO(bjornv): Change the near-end buffer handling to be the same as for
    // far-end, that is, with a near_pre_buf.
    // Buffer the near-end frame.
    WebRtc_WriteBuffer(aec->nearFrBuf, nearend, FRAME_LEN);
    // For H band
    if (aec->sampFreq == 32000) {
        WebRtc_WriteBuffer(aec->nearFrBufH, nearendH, FRAME_LEN);
    }

    // 1) At most we process |aec->mult|+1 partitions in 10 ms. Make sure we
    // have enough far-end data for that by stuffing the buffer if the
    // |system_delay| indicates others.
    if (aec->system_delay < FRAME_LEN) {
      // We don't have enough data so we rewind 10 ms.
      WebRtcAec_MoveFarReadPtr(aec, -(aec->mult + 1));
    }

    // 2) Compensate for a possible change in the system delay.
    WebRtc_MoveReadPtr(aec->far_buf_windowed, move_elements);
    moved_elements = WebRtc_MoveReadPtr(aec->far_buf, move_elements);
    aec->knownDelay -= moved_elements * PART_LEN;
#ifdef WEBRTC_AEC_DEBUG_DUMP
    WebRtc_MoveReadPtr(aec->far_time_buf, move_elements);
#endif

    // 4) Process as many blocks as possible.
    while (WebRtc_available_read(aec->nearFrBuf) >= PART_LEN) {
        ProcessBlock(aec);
    }

    // 5) Update system delay with respect to the entire frame.
    aec->system_delay -= FRAME_LEN;
}
Exemplo n.º 4
0
// only buffer L band for farend
int32_t WebRtcAec_BufferFarend(void* aecInst,
                               const float* farend,
                               size_t nrOfSamples) {
  Aec* aecpc = aecInst;
  size_t newNrOfSamples = nrOfSamples;
  float new_farend[MAX_RESAMP_LEN];
  const float* farend_ptr = farend;

  // Get any error caused by buffering the farend signal.
  int32_t error_code = WebRtcAec_GetBufferFarendError(aecInst, farend,
                                                      nrOfSamples);

  if (error_code != 0)
    return error_code;


  if (aecpc->skewMode == kAecTrue && aecpc->resample == kAecTrue) {
    // Resample and get a new number of samples
    WebRtcAec_ResampleLinear(aecpc->resampler,
                             farend,
                             nrOfSamples,
                             aecpc->skew,
                             new_farend,
                             &newNrOfSamples);
    farend_ptr = new_farend;
  }

  aecpc->farend_started = 1;
  WebRtcAec_SetSystemDelay(
      aecpc->aec, WebRtcAec_system_delay(aecpc->aec) + (int)newNrOfSamples);

  // Write the time-domain data to |far_pre_buf|.
  WebRtc_WriteBuffer(aecpc->far_pre_buf, farend_ptr, newNrOfSamples);

  // Transform to frequency domain if we have enough data.
  while (WebRtc_available_read(aecpc->far_pre_buf) >= PART_LEN2) {
    // We have enough data to pass to the FFT, hence read PART_LEN2 samples.
    {
      float* ptmp = NULL;
      float tmp[PART_LEN2];
      WebRtc_ReadBuffer(aecpc->far_pre_buf, (void**)&ptmp, tmp, PART_LEN2);
      WebRtcAec_BufferFarendPartition(aecpc->aec, ptmp);
#ifdef WEBRTC_AEC_DEBUG_DUMP
      WebRtc_WriteBuffer(
          WebRtcAec_far_time_buf(aecpc->aec), &ptmp[PART_LEN], 1);
#endif
    }

    // Rewind |far_pre_buf| PART_LEN samples for overlap before continuing.
    WebRtc_MoveReadPtr(aecpc->far_pre_buf, -PART_LEN);
  }

  return 0;
}
Exemplo n.º 5
0
int WebRtc_MoveReadPtr(RingBuffer* self, int element_count) {
  if (!self) {
    return 0;
  }

  {
    // We need to be able to take care of negative changes, hence use "int"
    // instead of "size_t".
    const int free_elements = (int) WebRtc_available_write(self);
    const int readable_elements = (int) WebRtc_available_read(self);
    int read_pos = (int) self->read_pos;

    if (element_count > readable_elements) {
      element_count = readable_elements;
    }
    if (element_count < -free_elements) {
      element_count = -free_elements;
    }

    read_pos += element_count;
    if (read_pos > (int) self->element_count) {
      // Buffer wrap around. Restart read position and wrap indicator.
      read_pos -= (int) self->element_count;
      self->rw_wrap = SAME_WRAP;
    }
    if (read_pos < 0) {
      // Buffer wrap around. Restart read position and wrap indicator.
      read_pos += (int) self->element_count;
      self->rw_wrap = DIFF_WRAP;
    }

    self->read_pos = (size_t) read_pos;

    return element_count;
  }
}
// only buffer L band for farend
int32_t WebRtcAec_BufferFarend(void *aecInst, const int16_t *farend,
                               int16_t nrOfSamples)
{
    aecpc_t *aecpc = aecInst;
    int32_t retVal = 0;
    int newNrOfSamples = (int) nrOfSamples;
    short newFarend[MAX_RESAMP_LEN];
    const int16_t* farend_ptr = farend;
    float tmp_farend[MAX_RESAMP_LEN];
    const float* farend_float = tmp_farend;
    float skew;
    int i = 0;

    if (aecpc == NULL) {
        return -1;
    }

    if (farend == NULL) {
        aecpc->lastError = AEC_NULL_POINTER_ERROR;
        return -1;
    }

    if (aecpc->initFlag != initCheck) {
        aecpc->lastError = AEC_UNINITIALIZED_ERROR;
        return -1;
    }

    // number of samples == 160 for SWB input
    if (nrOfSamples != 80 && nrOfSamples != 160) {
        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
        return -1;
    }

    skew = aecpc->skew;

    if (aecpc->skewMode == kAecTrue && aecpc->resample == kAecTrue) {
        // Resample and get a new number of samples
        WebRtcAec_ResampleLinear(aecpc->resampler, farend, nrOfSamples, skew,
                                 newFarend, &newNrOfSamples);
        farend_ptr = (const int16_t*) newFarend;
    }

    WebRtcAec_SetSystemDelay(aecpc->aec, WebRtcAec_system_delay(aecpc->aec) +
                             newNrOfSamples);

#ifdef WEBRTC_AEC_DEBUG_DUMP
    WebRtc_WriteBuffer(aecpc->far_pre_buf_s16, farend_ptr,
                       (size_t) newNrOfSamples);
#endif
    // Cast to float and write the time-domain data to |far_pre_buf|.
    for (i = 0; i < newNrOfSamples; i++) {
      tmp_farend[i] = (float) farend_ptr[i];
    }
    WebRtc_WriteBuffer(aecpc->far_pre_buf, farend_float,
                       (size_t) newNrOfSamples);

    // Transform to frequency domain if we have enough data.
    while (WebRtc_available_read(aecpc->far_pre_buf) >= PART_LEN2) {
      // We have enough data to pass to the FFT, hence read PART_LEN2 samples.
      WebRtc_ReadBuffer(aecpc->far_pre_buf, (void**) &farend_float, tmp_farend,
                        PART_LEN2);

      WebRtcAec_BufferFarendPartition(aecpc->aec, farend_float);

      // Rewind |far_pre_buf| PART_LEN samples for overlap before continuing.
      WebRtc_MoveReadPtr(aecpc->far_pre_buf, -PART_LEN);
#ifdef WEBRTC_AEC_DEBUG_DUMP
      WebRtc_ReadBuffer(aecpc->far_pre_buf_s16, (void**) &farend_ptr, newFarend,
                        PART_LEN2);
      WebRtc_WriteBuffer(WebRtcAec_far_time_buf(aecpc->aec),
                         &farend_ptr[PART_LEN], 1);
      WebRtc_MoveReadPtr(aecpc->far_pre_buf_s16, -PART_LEN);
#endif
    }

    return retVal;
}
Exemplo n.º 7
0
WebRtc_Word32 WebRtcAec_Process(void *aecInst, const WebRtc_Word16 *nearend,
    const WebRtc_Word16 *nearendH, WebRtc_Word16 *out, WebRtc_Word16 *outH,
    WebRtc_Word16 nrOfSamples, WebRtc_Word16 msInSndCardBuf, WebRtc_Word32 skew)
{
    aecpc_t *aecpc = aecInst;
    WebRtc_Word32 retVal = 0;
    short i;
    short nBlocks10ms;
    short nFrames;
    // Limit resampling to doubling/halving of signal
    const float minSkewEst = -0.5f;
    const float maxSkewEst = 1.0f;

    if (aecpc == NULL) {
        return -1;
    }

    if (nearend == NULL) {
        aecpc->lastError = AEC_NULL_POINTER_ERROR;
        return -1;
    }

    if (out == NULL) {
        aecpc->lastError = AEC_NULL_POINTER_ERROR;
        return -1;
    }

    if (aecpc->initFlag != initCheck) {
        aecpc->lastError = AEC_UNINITIALIZED_ERROR;
        return -1;
    }

    // number of samples == 160 for SWB input
    if (nrOfSamples != 80 && nrOfSamples != 160) {
        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
        return -1;
    }

    // Check for valid pointers based on sampling rate
    if (aecpc->sampFreq == 32000 && nearendH == NULL) {
       aecpc->lastError = AEC_NULL_POINTER_ERROR;
       return -1;
    }

    if (msInSndCardBuf < 0) {
        msInSndCardBuf = 0;
        aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
        retVal = -1;
    }
    else if (msInSndCardBuf > 500) {
        msInSndCardBuf = 500;
        aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
        retVal = -1;
    }
    // TODO(andrew): we need to investigate if this +10 is really wanted.
    msInSndCardBuf += 10;
    aecpc->msInSndCardBuf = msInSndCardBuf;

    if (aecpc->skewMode == kAecTrue) {
        if (aecpc->skewFrCtr < 25) {
            aecpc->skewFrCtr++;
        }
        else {
            retVal = WebRtcAec_GetSkew(aecpc->resampler, skew, &aecpc->skew);
            if (retVal == -1) {
                aecpc->skew = 0;
                aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
            }

            aecpc->skew /= aecpc->sampFactor*nrOfSamples;

            if (aecpc->skew < 1.0e-3 && aecpc->skew > -1.0e-3) {
                aecpc->resample = kAecFalse;
            }
            else {
                aecpc->resample = kAecTrue;
            }

            if (aecpc->skew < minSkewEst) {
                aecpc->skew = minSkewEst;
            }
            else if (aecpc->skew > maxSkewEst) {
                aecpc->skew = maxSkewEst;
            }

#ifdef WEBRTC_AEC_DEBUG_DUMP
            fwrite(&aecpc->skew, sizeof(aecpc->skew), 1, aecpc->skewFile);
#endif
        }
    }

    nFrames = nrOfSamples / FRAME_LEN;
    nBlocks10ms = nFrames / aecpc->aec->mult;

    if (aecpc->ECstartup) {
        if (nearend != out) {
            // Only needed if they don't already point to the same place.
            memcpy(out, nearend, sizeof(short) * nrOfSamples);
        }

        // The AEC is in the start up mode
        // AEC is disabled until the system delay is OK

        // Mechanism to ensure that the system delay is reasonably stable.
        if (aecpc->checkBuffSize) {
            aecpc->checkBufSizeCtr++;
            // Before we fill up the far-end buffer we require the system delay
            // to be stable (+/-8 ms) compared to the first value. This
            // comparison is made during the following 6 consecutive 10 ms
            // blocks. If it seems to be stable then we start to fill up the
            // far-end buffer.
            if (aecpc->counter == 0) {
                aecpc->firstVal = aecpc->msInSndCardBuf;
                aecpc->sum = 0;
            }

            if (abs(aecpc->firstVal - aecpc->msInSndCardBuf) <
                WEBRTC_SPL_MAX(0.2 * aecpc->msInSndCardBuf, sampMsNb)) {
                aecpc->sum += aecpc->msInSndCardBuf;
                aecpc->counter++;
            }
            else {
                aecpc->counter = 0;
            }

            if (aecpc->counter * nBlocks10ms >= 6) {
                // The far-end buffer size is determined in partitions of
                // PART_LEN samples. Use 75% of the average value of the system
                // delay as buffer size to start with.
                aecpc->bufSizeStart = WEBRTC_SPL_MIN((3 * aecpc->sum *
                  aecpc->aec->mult * 8) / (4 * aecpc->counter * PART_LEN),
                  kMaxBufSizeStart);
                // Buffer size has now been determined.
                aecpc->checkBuffSize = 0;
            }

            if (aecpc->checkBufSizeCtr * nBlocks10ms > 50) {
                // For really bad systems, don't disable the echo canceller for
                // more than 0.5 sec.
                aecpc->bufSizeStart = WEBRTC_SPL_MIN((aecpc->msInSndCardBuf *
                    aecpc->aec->mult * 3) / 40, kMaxBufSizeStart);
                aecpc->checkBuffSize = 0;
            }
        }

        // If |checkBuffSize| changed in the if-statement above.
        if (!aecpc->checkBuffSize) {
            // The system delay is now reasonably stable (or has been unstable
            // for too long). When the far-end buffer is filled with
            // approximately the same amount of data as reported by the system
            // we end the startup phase.
            int overhead_elements = aecpc->aec->system_delay / PART_LEN -
                aecpc->bufSizeStart;
            if (overhead_elements == 0) {
                // Enable the AEC
                aecpc->ECstartup = 0;
            } else if (overhead_elements > 0) {
                // TODO(bjornv): Do we need a check on how much we actually
                // moved the read pointer? It should always be possible to move
                // the pointer |overhead_elements| since we have only added data
                // to the buffer and no delay compensation nor AEC processing
                // has been done.
                WebRtcAec_MoveFarReadPtr(aecpc->aec, overhead_elements);

                // Enable the AEC
                aecpc->ECstartup = 0;
            }
        }
    } else {
        // AEC is enabled.

        int out_elements = 0;

        EstBufDelay(aecpc);

        // Note that 1 frame is supported for NB and 2 frames for WB.
        for (i = 0; i < nFrames; i++) {
            int16_t* out_ptr = NULL;
            int16_t out_tmp[FRAME_LEN];

            // Call the AEC.
            WebRtcAec_ProcessFrame(aecpc->aec,
                                   &nearend[FRAME_LEN * i],
                                   &nearendH[FRAME_LEN * i],
                                   aecpc->knownDelay);
            // TODO(bjornv): Re-structure such that we don't have to pass
            // |aecpc->knownDelay| as input. Change name to something like
            // |system_buffer_diff|.

            // Stuff the out buffer if we have less than a frame to output.
            // This should only happen for the first frame.
            out_elements = (int) WebRtc_available_read(aecpc->aec->outFrBuf);
            if (out_elements < FRAME_LEN) {
                WebRtc_MoveReadPtr(aecpc->aec->outFrBuf,
                                   out_elements - FRAME_LEN);
                if (aecpc->sampFreq == 32000) {
                    WebRtc_MoveReadPtr(aecpc->aec->outFrBufH,
                                       out_elements - FRAME_LEN);
                }
            }

            // Obtain an output frame.
            WebRtc_ReadBuffer(aecpc->aec->outFrBuf, (void**) &out_ptr,
                              out_tmp, FRAME_LEN);
            memcpy(&out[FRAME_LEN * i], out_ptr, sizeof(int16_t) * FRAME_LEN);
            // For H band
            if (aecpc->sampFreq == 32000) {
                WebRtc_ReadBuffer(aecpc->aec->outFrBufH, (void**) &out_ptr,
                                  out_tmp, FRAME_LEN);
                memcpy(&outH[FRAME_LEN * i], out_ptr,
                       sizeof(int16_t) * FRAME_LEN);
            }
        }
    }

#ifdef WEBRTC_AEC_DEBUG_DUMP
    {
        int16_t far_buf_size_ms = (int16_t) (aecpc->aec->system_delay /
            (sampMsNb * aecpc->aec->mult));
        fwrite(&far_buf_size_ms, 2, 1, aecpc->bufFile);
        fwrite(&(aecpc->knownDelay), sizeof(aecpc->knownDelay), 1, aecpc->delayFile);
    }
#endif

    return retVal;
}
Exemplo n.º 8
0
// only buffer L band for farend
int32_t WebRtcAec_BufferFarend(void* aecInst,
                               const float* farend,
                               int16_t nrOfSamples) {
  Aec* aecpc = aecInst;
  int newNrOfSamples = (int)nrOfSamples;
  float new_farend[MAX_RESAMP_LEN];
  const float* farend_ptr = farend;

  if (farend == NULL) {
    aecpc->lastError = AEC_NULL_POINTER_ERROR;
    return -1;
  }

  if (aecpc->initFlag != initCheck) {
    aecpc->lastError = AEC_UNINITIALIZED_ERROR;
    return -1;
  }

  // number of samples == 160 for SWB input
  if (nrOfSamples != 80 && nrOfSamples != 160) {
    aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
    return -1;
  }

  if (aecpc->skewMode == kAecTrue && aecpc->resample == kAecTrue) {
    // Resample and get a new number of samples
    WebRtcAec_ResampleLinear(aecpc->resampler,
                             farend,
                             nrOfSamples,
                             aecpc->skew,
                             new_farend,
                             &newNrOfSamples);
    farend_ptr = new_farend;
  }

  aecpc->farend_started = 1;
  WebRtcAec_SetSystemDelay(aecpc->aec,
                           WebRtcAec_system_delay(aecpc->aec) + newNrOfSamples);

  // Write the time-domain data to |far_pre_buf|.
  WebRtc_WriteBuffer(aecpc->far_pre_buf, farend_ptr, (size_t)newNrOfSamples);

  // Transform to frequency domain if we have enough data.
  while (WebRtc_available_read(aecpc->far_pre_buf) >= PART_LEN2) {
    // We have enough data to pass to the FFT, hence read PART_LEN2 samples.
    {
      float* ptmp = NULL;
      float tmp[PART_LEN2];
      WebRtc_ReadBuffer(aecpc->far_pre_buf, (void**)&ptmp, tmp, PART_LEN2);
      WebRtcAec_BufferFarendPartition(aecpc->aec, ptmp);
#ifdef WEBRTC_AEC_DEBUG_DUMP
      WebRtc_WriteBuffer(
          WebRtcAec_far_time_buf(aecpc->aec), &ptmp[PART_LEN], 1);
#endif
    }

    // Rewind |far_pre_buf| PART_LEN samples for overlap before continuing.
    WebRtc_MoveReadPtr(aecpc->far_pre_buf, -PART_LEN);
  }

  return 0;
}
Exemplo n.º 9
0
static void NonLinearProcessing(aec_t *aec, short *output, short *outputH)
{
    float efw[2][PART_LEN1], dfw[2][PART_LEN1], xfw[2][PART_LEN1];
    complex_t comfortNoiseHband[PART_LEN1];
    float fft[PART_LEN2];
    float scale, dtmp;
    float nlpGainHband;
    int i, j, pos;

    // Coherence and non-linear filter
    float cohde[PART_LEN1], cohxd[PART_LEN1];
    float hNlDeAvg, hNlXdAvg;
    float hNl[PART_LEN1];
    float hNlPref[PREF_BAND_SIZE];
    float hNlFb = 0, hNlFbLow = 0;
    const float prefBandQuant = 0.75f, prefBandQuantLow = 0.5f;
    const int prefBandSize = PREF_BAND_SIZE / aec->mult;
    const int minPrefBand = 4 / aec->mult;

    // Near and error power sums
    float sdSum = 0, seSum = 0;

    // Power estimate smoothing coefficients
    const float gCoh[2][2] = {{0.9f, 0.1f}, {0.93f, 0.07f}};
    const float *ptrGCoh = gCoh[aec->mult - 1];

    // Filter energy
    float wfEnMax = 0, wfEn = 0;
    const int delayEstInterval = 10 * aec->mult;

    float* xfw_ptr = NULL;

    aec->delayEstCtr++;
    if (aec->delayEstCtr == delayEstInterval) {
        aec->delayEstCtr = 0;
    }

    // initialize comfort noise for H band
    memset(comfortNoiseHband, 0, sizeof(comfortNoiseHband));
    nlpGainHband = (float)0.0;
    dtmp = (float)0.0;

    // Measure energy in each filter partition to determine delay.
    // TODO: Spread by computing one partition per block?
    if (aec->delayEstCtr == 0) {
        wfEnMax = 0;
        aec->delayIdx = 0;
        for (i = 0; i < NR_PART; i++) {
            pos = i * PART_LEN1;
            wfEn = 0;
            for (j = 0; j < PART_LEN1; j++) {
                wfEn += aec->wfBuf[0][pos + j] * aec->wfBuf[0][pos + j] +
                    aec->wfBuf[1][pos + j] * aec->wfBuf[1][pos + j];
            }

            if (wfEn > wfEnMax) {
                wfEnMax = wfEn;
                aec->delayIdx = i;
            }
        }
    }

    // We should always have at least one element stored in |far_buf|.
    assert(WebRtc_available_read(aec->far_buf_windowed) > 0);
    // NLP
    WebRtc_ReadBuffer(aec->far_buf_windowed, (void**) &xfw_ptr, &xfw[0][0], 1);

    // TODO(bjornv): Investigate if we can reuse |far_buf_windowed| instead of
    // |xfwBuf|.
    // Buffer far.
    memcpy(aec->xfwBuf, xfw_ptr, sizeof(float) * 2 * PART_LEN1);

    // Use delayed far.
    memcpy(xfw, aec->xfwBuf + aec->delayIdx * PART_LEN1, sizeof(xfw));

    // Windowed near fft
    for (i = 0; i < PART_LEN; i++) {
        fft[i] = aec->dBuf[i] * sqrtHanning[i];
        fft[PART_LEN + i] = aec->dBuf[PART_LEN + i] * sqrtHanning[PART_LEN - i];
    }
    aec_rdft_forward_128(fft);

    dfw[1][0] = 0;
    dfw[1][PART_LEN] = 0;
    dfw[0][0] = fft[0];
    dfw[0][PART_LEN] = fft[1];
    for (i = 1; i < PART_LEN; i++) {
        dfw[0][i] = fft[2 * i];
        dfw[1][i] = fft[2 * i + 1];
    }

    // Windowed error fft
    for (i = 0; i < PART_LEN; i++) {
        fft[i] = aec->eBuf[i] * sqrtHanning[i];
        fft[PART_LEN + i] = aec->eBuf[PART_LEN + i] * sqrtHanning[PART_LEN - i];
    }
    aec_rdft_forward_128(fft);
    efw[1][0] = 0;
    efw[1][PART_LEN] = 0;
    efw[0][0] = fft[0];
    efw[0][PART_LEN] = fft[1];
    for (i = 1; i < PART_LEN; i++) {
        efw[0][i] = fft[2 * i];
        efw[1][i] = fft[2 * i + 1];
    }

    // Smoothed PSD
    for (i = 0; i < PART_LEN1; i++) {
        aec->sd[i] = ptrGCoh[0] * aec->sd[i] + ptrGCoh[1] *
            (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]);
        aec->se[i] = ptrGCoh[0] * aec->se[i] + ptrGCoh[1] *
            (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]);
        // We threshold here to protect against the ill-effects of a zero farend.
        // The threshold is not arbitrarily chosen, but balances protection and
        // adverse interaction with the algorithm's tuning.
        // TODO: investigate further why this is so sensitive.
        aec->sx[i] = ptrGCoh[0] * aec->sx[i] + ptrGCoh[1] *
            WEBRTC_SPL_MAX(xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i], 15);

        aec->sde[i][0] = ptrGCoh[0] * aec->sde[i][0] + ptrGCoh[1] *
            (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]);
        aec->sde[i][1] = ptrGCoh[0] * aec->sde[i][1] + ptrGCoh[1] *
            (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]);

        aec->sxd[i][0] = ptrGCoh[0] * aec->sxd[i][0] + ptrGCoh[1] *
            (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]);
        aec->sxd[i][1] = ptrGCoh[0] * aec->sxd[i][1] + ptrGCoh[1] *
            (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]);

        sdSum += aec->sd[i];
        seSum += aec->se[i];
    }

    // Divergent filter safeguard.
    if (aec->divergeState == 0) {
        if (seSum > sdSum) {
            aec->divergeState = 1;
        }
    }
    else {
        if (seSum * 1.05f < sdSum) {
            aec->divergeState = 0;
        }
    }

    if (aec->divergeState == 1) {
        memcpy(efw, dfw, sizeof(efw));
    }

    // Reset if error is significantly larger than nearend (13 dB).
    if (seSum > (19.95f * sdSum)) {
        memset(aec->wfBuf, 0, sizeof(aec->wfBuf));
    }

    // Subband coherence
    for (i = 0; i < PART_LEN1; i++) {
        cohde[i] = (aec->sde[i][0] * aec->sde[i][0] + aec->sde[i][1] * aec->sde[i][1]) /
            (aec->sd[i] * aec->se[i] + 1e-10f);
        cohxd[i] = (aec->sxd[i][0] * aec->sxd[i][0] + aec->sxd[i][1] * aec->sxd[i][1]) /
            (aec->sx[i] * aec->sd[i] + 1e-10f);
    }

    hNlXdAvg = 0;
    for (i = minPrefBand; i < prefBandSize + minPrefBand; i++) {
        hNlXdAvg += cohxd[i];
    }
    hNlXdAvg /= prefBandSize;
    hNlXdAvg = 1 - hNlXdAvg;

    hNlDeAvg = 0;
    for (i = minPrefBand; i < prefBandSize + minPrefBand; i++) {
        hNlDeAvg += cohde[i];
    }
    hNlDeAvg /= prefBandSize;

    if (hNlXdAvg < 0.75f && hNlXdAvg < aec->hNlXdAvgMin) {
        aec->hNlXdAvgMin = hNlXdAvg;
    }

    if (hNlDeAvg > 0.98f && hNlXdAvg > 0.9f) {
        aec->stNearState = 1;
    }
    else if (hNlDeAvg < 0.95f || hNlXdAvg < 0.8f) {
        aec->stNearState = 0;
    }

    if (aec->hNlXdAvgMin == 1) {
        aec->echoState = 0;
        aec->overDrive = aec->minOverDrive;

        if (aec->stNearState == 1) {
            memcpy(hNl, cohde, sizeof(hNl));
            hNlFb = hNlDeAvg;
            hNlFbLow = hNlDeAvg;
        }
        else {
            for (i = 0; i < PART_LEN1; i++) {
                hNl[i] = 1 - cohxd[i];
            }
            hNlFb = hNlXdAvg;
            hNlFbLow = hNlXdAvg;
        }
    }
    else {

        if (aec->stNearState == 1) {
            aec->echoState = 0;
            memcpy(hNl, cohde, sizeof(hNl));
            hNlFb = hNlDeAvg;
            hNlFbLow = hNlDeAvg;
        }
        else {
            aec->echoState = 1;
            for (i = 0; i < PART_LEN1; i++) {
                hNl[i] = WEBRTC_SPL_MIN(cohde[i], 1 - cohxd[i]);
            }

            // Select an order statistic from the preferred bands.
            // TODO: Using quicksort now, but a selection algorithm may be preferred.
            memcpy(hNlPref, &hNl[minPrefBand], sizeof(float) * prefBandSize);
            qsort(hNlPref, prefBandSize, sizeof(float), CmpFloat);
            hNlFb = hNlPref[(int)floor(prefBandQuant * (prefBandSize - 1))];
            hNlFbLow = hNlPref[(int)floor(prefBandQuantLow * (prefBandSize - 1))];
        }
    }

    // Track the local filter minimum to determine suppression overdrive.
    if (hNlFbLow < 0.6f && hNlFbLow < aec->hNlFbLocalMin) {
        aec->hNlFbLocalMin = hNlFbLow;
        aec->hNlFbMin = hNlFbLow;
        aec->hNlNewMin = 1;
        aec->hNlMinCtr = 0;
    }
    aec->hNlFbLocalMin = WEBRTC_SPL_MIN(aec->hNlFbLocalMin + 0.0008f / aec->mult, 1);
    aec->hNlXdAvgMin = WEBRTC_SPL_MIN(aec->hNlXdAvgMin + 0.0006f / aec->mult, 1);

    if (aec->hNlNewMin == 1) {
        aec->hNlMinCtr++;
    }
    if (aec->hNlMinCtr == 2) {
        aec->hNlNewMin = 0;
        aec->hNlMinCtr = 0;
        aec->overDrive = WEBRTC_SPL_MAX(aec->targetSupp /
            ((float)log(aec->hNlFbMin + 1e-10f) + 1e-10f), aec->minOverDrive);
    }

    // Smooth the overdrive.
    if (aec->overDrive < aec->overDriveSm) {
      aec->overDriveSm = 0.99f * aec->overDriveSm + 0.01f * aec->overDrive;
    }
    else {
      aec->overDriveSm = 0.9f * aec->overDriveSm + 0.1f * aec->overDrive;
    }

    WebRtcAec_OverdriveAndSuppress(aec, hNl, hNlFb, efw);

    // Add comfort noise.
    ComfortNoise(aec, efw, comfortNoiseHband, aec->noisePow, hNl);

    // TODO(bjornv): Investigate how to take the windowing below into account if
    // needed.
    if (aec->metricsMode == 1) {
      // Note that we have a scaling by two in the time domain |eBuf|.
      // In addition the time domain signal is windowed before transformation,
      // losing half the energy on the average. We take care of the first
      // scaling only in UpdateMetrics().
      UpdateLevel(&aec->nlpoutlevel, efw);
    }
    // Inverse error fft.
    fft[0] = efw[0][0];
    fft[1] = efw[0][PART_LEN];
    for (i = 1; i < PART_LEN; i++) {
        fft[2*i] = efw[0][i];
        // Sign change required by Ooura fft.
        fft[2*i + 1] = -efw[1][i];
    }
    aec_rdft_inverse_128(fft);

    // Overlap and add to obtain output.
    scale = 2.0f / PART_LEN2;
    for (i = 0; i < PART_LEN; i++) {
        fft[i] *= scale; // fft scaling
        fft[i] = fft[i]*sqrtHanning[i] + aec->outBuf[i];

        // Saturation protection
        output[i] = (short)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, fft[i],
            WEBRTC_SPL_WORD16_MIN);

        fft[PART_LEN + i] *= scale; // fft scaling
        aec->outBuf[i] = fft[PART_LEN + i] * sqrtHanning[PART_LEN - i];
    }

    // For H band
    if (aec->sampFreq == 32000) {

        // H band gain
        // average nlp over low band: average over second half of freq spectrum
        // (4->8khz)
        GetHighbandGain(hNl, &nlpGainHband);

        // Inverse comfort_noise
        if (flagHbandCn == 1) {
            fft[0] = comfortNoiseHband[0][0];
            fft[1] = comfortNoiseHband[PART_LEN][0];
            for (i = 1; i < PART_LEN; i++) {
                fft[2*i] = comfortNoiseHband[i][0];
                fft[2*i + 1] = comfortNoiseHband[i][1];
            }
            aec_rdft_inverse_128(fft);
            scale = 2.0f / PART_LEN2;
        }

        // compute gain factor
        for (i = 0; i < PART_LEN; i++) {
            dtmp = (float)aec->dBufH[i];
            dtmp = (float)dtmp * nlpGainHband; // for variable gain

            // add some comfort noise where Hband is attenuated
            if (flagHbandCn == 1) {
                fft[i] *= scale; // fft scaling
                dtmp += cnScaleHband * fft[i];
            }

            // Saturation protection
            outputH[i] = (short)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, dtmp,
                WEBRTC_SPL_WORD16_MIN);
         }
    }

    // Copy the current block to the old position.
    memcpy(aec->dBuf, aec->dBuf + PART_LEN, sizeof(float) * PART_LEN);
    memcpy(aec->eBuf, aec->eBuf + PART_LEN, sizeof(float) * PART_LEN);

    // Copy the current block to the old position for H band
    if (aec->sampFreq == 32000) {
        memcpy(aec->dBufH, aec->dBufH + PART_LEN, sizeof(float) * PART_LEN);
    }

    memmove(aec->xfwBuf + PART_LEN1, aec->xfwBuf, sizeof(aec->xfwBuf) -
        sizeof(complex_t) * PART_LEN1);
}
Exemplo n.º 10
0
static void ProcessBlock(aec_t* aec) {
    int i;
    float d[PART_LEN], y[PART_LEN], e[PART_LEN], dH[PART_LEN];
    float scale;

    float fft[PART_LEN2];
    float xf[2][PART_LEN1], yf[2][PART_LEN1], ef[2][PART_LEN1];
    float df[2][PART_LEN1];
    float far_spectrum = 0.0f;
    float near_spectrum = 0.0f;
    float abs_far_spectrum[PART_LEN1];
    float abs_near_spectrum[PART_LEN1];

    const float gPow[2] = {0.9f, 0.1f};

    // Noise estimate constants.
    const int noiseInitBlocks = 500 * aec->mult;
    const float step = 0.1f;
    const float ramp = 1.0002f;
    const float gInitNoise[2] = {0.999f, 0.001f};

    int16_t nearend[PART_LEN];
    int16_t* nearend_ptr = NULL;
    int16_t output[PART_LEN];
    int16_t outputH[PART_LEN];

    float* xf_ptr = NULL;

    memset(dH, 0, sizeof(dH));
    if (aec->sampFreq == 32000) {
      // Get the upper band first so we can reuse |nearend|.
      WebRtc_ReadBuffer(aec->nearFrBufH,
                        (void**) &nearend_ptr,
                        nearend,
                        PART_LEN);
      for (i = 0; i < PART_LEN; i++) {
          dH[i] = (float) (nearend_ptr[i]);
      }
      memcpy(aec->dBufH + PART_LEN, dH, sizeof(float) * PART_LEN);
    }
    WebRtc_ReadBuffer(aec->nearFrBuf, (void**) &nearend_ptr, nearend, PART_LEN);

    // ---------- Ooura fft ----------
    // Concatenate old and new nearend blocks.
    for (i = 0; i < PART_LEN; i++) {
        d[i] = (float) (nearend_ptr[i]);
    }
    memcpy(aec->dBuf + PART_LEN, d, sizeof(float) * PART_LEN);

#ifdef WEBRTC_AEC_DEBUG_DUMP
    {
        int16_t farend[PART_LEN];
        int16_t* farend_ptr = NULL;
        WebRtc_ReadBuffer(aec->far_time_buf, (void**) &farend_ptr, farend, 1);
        (void)fwrite(farend_ptr, sizeof(int16_t), PART_LEN, aec->farFile);
        (void)fwrite(nearend_ptr, sizeof(int16_t), PART_LEN, aec->nearFile);
    }
#endif

    // We should always have at least one element stored in |far_buf|.
    assert(WebRtc_available_read(aec->far_buf) > 0);
    WebRtc_ReadBuffer(aec->far_buf, (void**) &xf_ptr, &xf[0][0], 1);

    // Near fft
    memcpy(fft, aec->dBuf, sizeof(float) * PART_LEN2);
    TimeToFrequency(fft, df, 0);

    // Power smoothing
    for (i = 0; i < PART_LEN1; i++) {
      far_spectrum = (xf_ptr[i] * xf_ptr[i]) +
          (xf_ptr[PART_LEN1 + i] * xf_ptr[PART_LEN1 + i]);
      aec->xPow[i] = gPow[0] * aec->xPow[i] + gPow[1] * NR_PART * far_spectrum;
      // Calculate absolute spectra
      abs_far_spectrum[i] = sqrtf(far_spectrum);

      near_spectrum = df[0][i] * df[0][i] + df[1][i] * df[1][i];
      aec->dPow[i] = gPow[0] * aec->dPow[i] + gPow[1] * near_spectrum;
      // Calculate absolute spectra
      abs_near_spectrum[i] = sqrtf(near_spectrum);
    }

    // Estimate noise power. Wait until dPow is more stable.
    if (aec->noiseEstCtr > 50) {
        for (i = 0; i < PART_LEN1; i++) {
            if (aec->dPow[i] < aec->dMinPow[i]) {
                aec->dMinPow[i] = (aec->dPow[i] + step * (aec->dMinPow[i] -
                    aec->dPow[i])) * ramp;
            }
            else {
                aec->dMinPow[i] *= ramp;
            }
        }
    }

    // Smooth increasing noise power from zero at the start,
    // to avoid a sudden burst of comfort noise.
    if (aec->noiseEstCtr < noiseInitBlocks) {
        aec->noiseEstCtr++;
        for (i = 0; i < PART_LEN1; i++) {
            if (aec->dMinPow[i] > aec->dInitMinPow[i]) {
                aec->dInitMinPow[i] = gInitNoise[0] * aec->dInitMinPow[i] +
                    gInitNoise[1] * aec->dMinPow[i];
            }
            else {
                aec->dInitMinPow[i] = aec->dMinPow[i];
            }
        }
        aec->noisePow = aec->dInitMinPow;
    }
    else {
        aec->noisePow = aec->dMinPow;
    }

    // Block wise delay estimation used for logging
    if (aec->delay_logging_enabled) {
      int delay_estimate = 0;
      // Estimate the delay
      delay_estimate = WebRtc_DelayEstimatorProcessFloat(aec->delay_estimator,
                                                         abs_far_spectrum,
                                                         abs_near_spectrum,
                                                         PART_LEN1);
      if (delay_estimate >= 0) {
        // Update delay estimate buffer.
        aec->delay_histogram[delay_estimate]++;
      }
    }

    // Update the xfBuf block position.
    aec->xfBufBlockPos--;
    if (aec->xfBufBlockPos == -1) {
        aec->xfBufBlockPos = NR_PART - 1;
    }

    // Buffer xf
    memcpy(aec->xfBuf[0] + aec->xfBufBlockPos * PART_LEN1, xf_ptr,
           sizeof(float) * PART_LEN1);
    memcpy(aec->xfBuf[1] + aec->xfBufBlockPos * PART_LEN1, &xf_ptr[PART_LEN1],
           sizeof(float) * PART_LEN1);

    memset(yf, 0, sizeof(yf));

    // Filter far
    WebRtcAec_FilterFar(aec, yf);

    // Inverse fft to obtain echo estimate and error.
    fft[0] = yf[0][0];
    fft[1] = yf[0][PART_LEN];
    for (i = 1; i < PART_LEN; i++) {
        fft[2 * i] = yf[0][i];
        fft[2 * i + 1] = yf[1][i];
    }
    aec_rdft_inverse_128(fft);

    scale = 2.0f / PART_LEN2;
    for (i = 0; i < PART_LEN; i++) {
        y[i] = fft[PART_LEN + i] * scale; // fft scaling
    }

    for (i = 0; i < PART_LEN; i++) {
        e[i] = d[i] - y[i];
    }

    // Error fft
    memcpy(aec->eBuf + PART_LEN, e, sizeof(float) * PART_LEN);
    memset(fft, 0, sizeof(float) * PART_LEN);
    memcpy(fft + PART_LEN, e, sizeof(float) * PART_LEN);
    // TODO(bjornv): Change to use TimeToFrequency().
    aec_rdft_forward_128(fft);

    ef[1][0] = 0;
    ef[1][PART_LEN] = 0;
    ef[0][0] = fft[0];
    ef[0][PART_LEN] = fft[1];
    for (i = 1; i < PART_LEN; i++) {
        ef[0][i] = fft[2 * i];
        ef[1][i] = fft[2 * i + 1];
    }

    if (aec->metricsMode == 1) {
      // Note that the first PART_LEN samples in fft (before transformation) are
      // zero. Hence, the scaling by two in UpdateLevel() should not be
      // performed. That scaling is taken care of in UpdateMetrics() instead.
      UpdateLevel(&aec->linoutlevel, ef);
    }

    // Scale error signal inversely with far power.
    WebRtcAec_ScaleErrorSignal(aec, ef);
    WebRtcAec_FilterAdaptation(aec, fft, ef);
    NonLinearProcessing(aec, output, outputH);

    if (aec->metricsMode == 1) {
        // Update power levels and echo metrics
        UpdateLevel(&aec->farlevel, (float (*)[PART_LEN1]) xf_ptr);
        UpdateLevel(&aec->nearlevel, df);
        UpdateMetrics(aec);
    }

    // Store the output block.
    WebRtc_WriteBuffer(aec->outFrBuf, output, PART_LEN);
    // For H band
    if (aec->sampFreq == 32000) {
        WebRtc_WriteBuffer(aec->outFrBufH, outputH, PART_LEN);
    }

#ifdef WEBRTC_AEC_DEBUG_DUMP
    {
        int16_t eInt16[PART_LEN];
        for (i = 0; i < PART_LEN; i++) {
            eInt16[i] = (int16_t)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, e[i],
                WEBRTC_SPL_WORD16_MIN);
        }

        (void)fwrite(eInt16, sizeof(int16_t), PART_LEN, aec->outLinearFile);
        (void)fwrite(output, sizeof(int16_t), PART_LEN, aec->outFile);
    }
#endif
}