Beispiel #1
0
/* Compute the amplitude (sqrt energy) in each of the bands */
void compute_band_energies(const CELTMode *m, const celt_sig *X, celt_ener *bandE, int end, int C, int M)
{
   int i, c, N;
   const opus_int16 *eBands = m->eBands;
   N = M*m->shortMdctSize;
   c=0; do {
      for (i=0;i<end;i++)
      {
         int j;
         opus_val32 maxval=0;
         opus_val32 sum = 0;

         j=M*eBands[i]; do {
            maxval = MAX32(maxval, X[j+c*N]);
            maxval = MAX32(maxval, -X[j+c*N]);
         } while (++j<M*eBands[i+1]);

         if (maxval > 0)
         {
            int shift = celt_ilog2(maxval)-10;
            j=M*eBands[i]; do {
               sum = MAC16_16(sum, EXTRACT16(VSHR32(X[j+c*N],shift)),
                                   EXTRACT16(VSHR32(X[j+c*N],shift)));
            } while (++j<M*eBands[i+1]);
            /* We're adding one here to make damn sure we never end up with a pitch vector that's
               larger than unity norm */
            bandE[i+c*m->nbEBands] = EPSILON+VSHR32(EXTEND32(celt_sqrt(sum)),-shift);
         } else {
            bandE[i+c*m->nbEBands] = EPSILON;
         }
         /*printf ("%f ", bandE[i+c*m->nbEBands]);*/
      }
   } while (++c<C);
   /*printf ("\n");*/
}
Beispiel #2
0
void tonality_get_info(TonalityAnalysisState *tonal, AnalysisInfo *info_out, int len)
{
   int pos;
   int curr_lookahead;
   float psum;
   int i;

   pos = tonal->read_pos;
   curr_lookahead = tonal->write_pos-tonal->read_pos;
   if (curr_lookahead<0)
      curr_lookahead += DETECT_SIZE;

   /* On long frames, look at the second analysis window rather than the first. */
   if (len > tonal->Fs/50 && pos != tonal->write_pos)
   {
      pos++;
      if (pos==DETECT_SIZE)
         pos=0;
   }
   if (pos == tonal->write_pos)
      pos--;
   if (pos<0)
      pos = DETECT_SIZE-1;
   OPUS_COPY(info_out, &tonal->info[pos], 1);
   /* If possible, look ahead for a tone to compensate for the delay in the tone detector. */
   for (i=0;i<3;i++)
   {
      pos++;
      if (pos==DETECT_SIZE)
         pos = 0;
      if (pos == tonal->write_pos)
         break;
      info_out->tonality = MAX32(0, -.03f + MAX32(info_out->tonality, tonal->info[pos].tonality-.05f));
   }
   tonal->read_subframe += len/(tonal->Fs/400);
   while (tonal->read_subframe>=8)
   {
      tonal->read_subframe -= 8;
      tonal->read_pos++;
   }
   if (tonal->read_pos>=DETECT_SIZE)
      tonal->read_pos-=DETECT_SIZE;

   /* The -1 is to compensate for the delay in the features themselves. */
   curr_lookahead = IMAX(curr_lookahead-1, 0);

   psum=0;
   /* Summing the probability of transition patterns that involve music at
      time (DETECT_SIZE-curr_lookahead-1) */
   for (i=0;i<DETECT_SIZE-curr_lookahead;i++)
      psum += tonal->pmusic[i];
   for (;i<DETECT_SIZE;i++)
      psum += tonal->pspeech[i];
   psum = psum*tonal->music_confidence + (1-psum)*tonal->speech_confidence;
   /*printf("%f %f %f %f %f\n", psum, info_out->music_prob, info_out->vad_prob, info_out->activity_probability, info_out->tonality);*/

   info_out->music_prob = psum;
}
Beispiel #3
0
static __inline celt_int16_t FLOAT2INT16(float x)
{
   x = x*CELT_SIG_SCALE;
   x = MAX32(x, -32768);
   x = MIN32(x, 32767);
   return (celt_int16_t)float2int(x);
}
Beispiel #4
0
Datei: pitch.c Projekt: 93i/godot
static void find_best_pitch(opus_val32 *xcorr, opus_val16 *y, int len,
                            int max_pitch, int *best_pitch
#ifdef FIXED_POINT
                            , int yshift, opus_val32 maxcorr
#endif
                            )
{
   int i, j;
   opus_val32 Syy=1;
   opus_val16 best_num[2];
   opus_val32 best_den[2];
#ifdef FIXED_POINT
   int xshift;

   xshift = celt_ilog2(maxcorr)-14;
#endif

   best_num[0] = -1;
   best_num[1] = -1;
   best_den[0] = 0;
   best_den[1] = 0;
   best_pitch[0] = 0;
   best_pitch[1] = 1;
   for (j=0;j<len;j++)
      Syy = ADD32(Syy, SHR32(MULT16_16(y[j],y[j]), yshift));
   for (i=0;i<max_pitch;i++)
   {
      if (xcorr[i]>0)
      {
         opus_val16 num;
         opus_val32 xcorr16;
         xcorr16 = EXTRACT16(VSHR32(xcorr[i], xshift));
#ifndef FIXED_POINT
         /* Considering the range of xcorr16, this should avoid both underflows
            and overflows (inf) when squaring xcorr16 */
         xcorr16 *= 1e-12f;
#endif
         num = MULT16_16_Q15(xcorr16,xcorr16);
         if (MULT16_32_Q15(num,best_den[1]) > MULT16_32_Q15(best_num[1],Syy))
         {
            if (MULT16_32_Q15(num,best_den[0]) > MULT16_32_Q15(best_num[0],Syy))
            {
               best_num[1] = best_num[0];
               best_den[1] = best_den[0];
               best_pitch[1] = best_pitch[0];
               best_num[0] = num;
               best_den[0] = Syy;
               best_pitch[0] = i;
            } else {
               best_num[1] = num;
               best_den[1] = Syy;
               best_pitch[1] = i;
            }
         }
      }
      Syy += SHR32(MULT16_16(y[i+len],y[i+len]),yshift) - SHR32(MULT16_16(y[i],y[i]),yshift);
      Syy = MAX32(1, Syy);
   }
}
Beispiel #5
0
static int transient_analysis(celt_word32_t *in, int len, int C, int *transient_time, int *transient_shift)
{
   int c, i, n;
   celt_word32_t ratio;
   VARDECL(celt_word32_t, begin);
   SAVE_STACK;
   ALLOC(begin, len, celt_word32_t);
   for (i=0;i<len;i++)
      begin[i] = ABS32(SHR32(in[C*i],SIG_SHIFT));
   for (c=1;c<C;c++)
   {
      for (i=0;i<len;i++)
         begin[i] = MAX32(begin[i], ABS32(SHR32(in[C*i+c],SIG_SHIFT)));
   }
   for (i=1;i<len;i++)
      begin[i] = MAX32(begin[i-1],begin[i]);
   n = -1;
   for (i=8;i<len-8;i++)
   {
      if (begin[i] < MULT16_32_Q15(QCONST16(.2f,15),begin[len-1]))
         n=i;
   }
   if (n<32)
   {
      n = -1;
      ratio = 0;
   } else {
      ratio = DIV32(begin[len-1],1+begin[n-16]);
   }
   if (ratio < 0)
      ratio = 0;
   if (ratio > 1000)
      ratio = 1000;
   ratio *= ratio;
   
   if (ratio > 2048)
      *transient_shift = 3;
   else
      *transient_shift = 0;
   
   *transient_time = n;
   
   RESTORE_STACK;
   return ratio > 20;
}
Beispiel #6
0
static inline opus_val16 SIG2WORD16(celt_sig x)
{
#ifdef FIXED_POINT
   x = PSHR32(x, SIG_SHIFT);
   x = MAX32(x, -32768);
   x = MIN32(x, 32767);
   return EXTRACT16(x);
#else
   return (opus_val16)x;
#endif
}
Beispiel #7
0
static __inline celt_word16_t SIG2WORD16(celt_sig_t x)
{
#ifdef FIXED_POINT
   x = PSHR32(x, SIG_SHIFT);
   x = MAX32(x, -32768);
   x = MIN32(x, 32767);
   return EXTRACT16(x);
#else
   return (celt_word16_t)x;
#endif
}
Beispiel #8
0
static void find_best_pitch(opus_val32 *xcorr, opus_val16 *y, int len,
                            int max_pitch, int *best_pitch
#ifdef FIXED_POINT
                            , int yshift, opus_val32 maxcorr
#endif
                            )
{
   int i, j;
   opus_val32 Syy=1;
   opus_val16 best_num[2];
   opus_val32 best_den[2];
#ifdef FIXED_POINT
   int xshift;

   xshift = celt_ilog2(maxcorr)-14;
#endif

   best_num[0] = -1;
   best_num[1] = -1;
   best_den[0] = 0;
   best_den[1] = 0;
   best_pitch[0] = 0;
   best_pitch[1] = 1;
   for (j=0;j<len;j++)
      Syy = MAC16_16(Syy, y[j],y[j]);
   for (i=0;i<max_pitch;i++)
   {
      if (xcorr[i]>0)
      {
         opus_val16 num;
         opus_val32 xcorr16;
         xcorr16 = EXTRACT16(VSHR32(xcorr[i], xshift));
         num = MULT16_16_Q15(xcorr16,xcorr16);
         if (MULT16_32_Q15(num,best_den[1]) > MULT16_32_Q15(best_num[1],Syy))
         {
            if (MULT16_32_Q15(num,best_den[0]) > MULT16_32_Q15(best_num[0],Syy))
            {
               best_num[1] = best_num[0];
               best_den[1] = best_den[0];
               best_pitch[1] = best_pitch[0];
               best_num[0] = num;
               best_den[0] = Syy;
               best_pitch[0] = i;
            } else {
               best_num[1] = num;
               best_den[1] = Syy;
               best_pitch[1] = i;
            }
         }
      }
      Syy += SHR32(MULT16_16(y[i+len],y[i+len]),yshift) - SHR32(MULT16_16(y[i],y[i]),yshift);
      Syy = MAX32(1, Syy);
   }
}
Beispiel #9
0
/* Compute the amplitude (sqrt energy) in each of the bands */
void compute_band_energies(const CELTMode *m, const celt_sig *X, celt_ener *bank, int _C)
{
   int i, c, N;
   const celt_int16 *eBands = m->eBands;
   const int C = CHANNELS(_C);
   N = FRAMESIZE(m);
   for (c=0;c<C;c++)
   {
      for (i=0;i<m->nbEBands;i++)
      {
         int j;
         celt_word32 maxval=0;
         celt_word32 sum = 0;
         
         j=eBands[i]; do {
            maxval = MAX32(maxval, X[j+c*N]);
            maxval = MAX32(maxval, -X[j+c*N]);
         } while (++j<eBands[i+1]);
         
         if (maxval > 0)
         {
            int shift = celt_ilog2(maxval)-10;
            j=eBands[i]; do {
               sum = MAC16_16(sum, EXTRACT16(VSHR32(X[j+c*N],shift)),
                                   EXTRACT16(VSHR32(X[j+c*N],shift)));
            } while (++j<eBands[i+1]);
            /* We're adding one here to make damn sure we never end up with a pitch vector that's
               larger than unity norm */
            bank[i+c*m->nbEBands] = EPSILON+VSHR32(EXTEND32(celt_sqrt(sum)),-shift);
         } else {
            bank[i+c*m->nbEBands] = EPSILON;
         }
         /*printf ("%f ", bank[i+c*m->nbEBands]);*/
      }
   }
   /*printf ("\n");*/
}
Beispiel #10
0
/** Finds the best quantized 3-tap pitch predictor by analysis by synthesis */
int pitch_search_3tap(
    spx_word16_t target[],                 /* Target vector */
    spx_word16_t* sw,
    spx_coef_t ak[],                     /* LPCs for this subframe */
    spx_coef_t awk1[],                   /* Weighted LPCs #1 for this subframe */
    spx_coef_t awk2[],                   /* Weighted LPCs #2 for this subframe */
    spx_sig_t exc[],                    /* Excitation */
    const void* par,
    int   start,                    /* Smallest pitch value allowed */
    int   end,                      /* Largest pitch value allowed */
    spx_word16_t pitch_coef,               /* Voicing (pitch) coefficient */
    int   p,                        /* Number of LPC coeffs */
    int   nsf,                      /* Number of samples in subframe */
    SpeexBits* bits,
    char* stack,
    spx_word16_t* exc2,
    spx_word16_t* r,
    int complexity,
    int cdbk_offset,
    int plc_tuning,
    spx_word32_t* cumul_gain
) {
    int i;
    int cdbk_index, pitch = 0, best_gain_index = 0;
    VARDECL(spx_sig_t * best_exc);
    VARDECL(spx_word16_t * new_target);
    VARDECL(spx_word16_t * best_target);
    int best_pitch = 0;
    spx_word32_t err, best_err = -1;
    int N;
    const ltp_params* params;
    const signed char* gain_cdbk;
    int   gain_cdbk_size;
    int scaledown = 0;

    VARDECL(int * nbest);

    params = (const ltp_params*) par;
    gain_cdbk_size = 1 << params->gain_bits;
    gain_cdbk = params->gain_cdbk + 4 * gain_cdbk_size * cdbk_offset;

    N = complexity;
    if (N > 10)
        N = 10;
    if (N < 1)
        N = 1;

    ALLOC(nbest, N, int);
    params = (const ltp_params*) par;

    if (end < start) {
        speex_bits_pack(bits, 0, params->pitch_bits);
        speex_bits_pack(bits, 0, params->gain_bits);
        SPEEX_MEMSET(exc, 0, nsf);
        return start;
    }

#ifdef FIXED_POINT
    /* Check if we need to scale everything down in the pitch search to avoid overflows */
    for (i = 0; i < nsf; i++) {
        if (ABS16(target[i]) > 16383) {
            scaledown = 1;
            break;
        }
    }
    for (i = -end; i < nsf; i++) {
        if (ABS16(exc2[i]) > 16383) {
            scaledown = 1;
            break;
        }
    }
#endif
    if (N > end - start + 1)
        N = end - start + 1;
    if (end != start)
        open_loop_nbest_pitch(sw, start, end, nsf, nbest, NULL, N, stack);
    else
        nbest[0] = start;

    ALLOC(best_exc, nsf, spx_sig_t);
    ALLOC(new_target, nsf, spx_word16_t);
    ALLOC(best_target, nsf, spx_word16_t);

    for (i = 0; i < N; i++) {
        pitch = nbest[i];
        SPEEX_MEMSET(exc, 0, nsf);
        err = pitch_gain_search_3tap(target, ak, awk1, awk2, exc, gain_cdbk, gain_cdbk_size, pitch, p, nsf,
                                     bits, stack, exc2, r, new_target, &cdbk_index, plc_tuning, *cumul_gain, scaledown);
        if (err < best_err || best_err < 0) {
            SPEEX_COPY(best_exc, exc, nsf);
            SPEEX_COPY(best_target, new_target, nsf);
            best_err = err;
            best_pitch = pitch;
            best_gain_index = cdbk_index;
        }
    }
    /*printf ("pitch: %d %d\n", best_pitch, best_gain_index);*/
    speex_bits_pack(bits, best_pitch - start, params->pitch_bits);
    speex_bits_pack(bits, best_gain_index, params->gain_bits);
#ifdef FIXED_POINT
    *cumul_gain = MULT16_32_Q13(SHL16(params->gain_cdbk[4 * best_gain_index + 3], 8), MAX32(1024, *cumul_gain));
#else
    *cumul_gain = 0.03125 * MAX32(1024, *cumul_gain) * params->gain_cdbk[4 * best_gain_index + 3];
#endif
    /*printf ("%f\n", cumul_gain);*/
    /*printf ("encode pitch: %d %d\n", best_pitch, best_gain_index);*/
    SPEEX_COPY(exc, best_exc, nsf);
    SPEEX_COPY(target, best_target, nsf);
#ifdef FIXED_POINT
    /* Scale target back up if needed */
    if (scaledown) {
        for (i = 0; i < nsf; i++)
            target[i] = SHL16(target[i], 1);
    }
#endif
    return pitch;
}
Beispiel #11
0
void tonality_analysis(TonalityAnalysisState *tonal, AnalysisInfo *info_out, const CELTMode *celt_mode, const void *x, int len, int offset, int c1, int c2, int C, int lsb_depth, downmix_func downmix)
{
    int i, b;
    const kiss_fft_state *kfft;
    VARDECL(kiss_fft_cpx, in);
    VARDECL(kiss_fft_cpx, out);
    int N = 480, N2=240;
    float * OPUS_RESTRICT A = tonal->angle;
    float * OPUS_RESTRICT dA = tonal->d_angle;
    float * OPUS_RESTRICT d2A = tonal->d2_angle;
    VARDECL(float, tonality);
    VARDECL(float, noisiness);
    float band_tonality[NB_TBANDS];
    float logE[NB_TBANDS];
    float BFCC[8];
    float features[25];
    float frame_tonality;
    float max_frame_tonality;
    /*float tw_sum=0;*/
    float frame_noisiness;
    const float pi4 = (float)(M_PI*M_PI*M_PI*M_PI);
    float slope=0;
    float frame_stationarity;
    float relativeE;
    float frame_probs[2];
    float alpha, alphaE, alphaE2;
    float frame_loudness;
    float bandwidth_mask;
    int bandwidth=0;
    float maxE = 0;
    float noise_floor;
    int remaining;
    AnalysisInfo *info;
    SAVE_STACK;

    tonal->last_transition++;
    alpha = 1.f/IMIN(20, 1+tonal->count);
    alphaE = 1.f/IMIN(50, 1+tonal->count);
    alphaE2 = 1.f/IMIN(1000, 1+tonal->count);

    if (tonal->count<4)
       tonal->music_prob = .5;
    kfft = celt_mode->mdct.kfft[0];
    if (tonal->count==0)
       tonal->mem_fill = 240;
    downmix(x, &tonal->inmem[tonal->mem_fill], IMIN(len, ANALYSIS_BUF_SIZE-tonal->mem_fill), offset, c1, c2, C);
    if (tonal->mem_fill+len < ANALYSIS_BUF_SIZE)
    {
       tonal->mem_fill += len;
       /* Don't have enough to update the analysis */
       RESTORE_STACK;
       return;
    }
    info = &tonal->info[tonal->write_pos++];
    if (tonal->write_pos>=DETECT_SIZE)
       tonal->write_pos-=DETECT_SIZE;

    ALLOC(in, 480, kiss_fft_cpx);
    ALLOC(out, 480, kiss_fft_cpx);
    ALLOC(tonality, 240, float);
    ALLOC(noisiness, 240, float);
    for (i=0;i<N2;i++)
    {
       float w = analysis_window[i];
       in[i].r = (kiss_fft_scalar)(w*tonal->inmem[i]);
       in[i].i = (kiss_fft_scalar)(w*tonal->inmem[N2+i]);
       in[N-i-1].r = (kiss_fft_scalar)(w*tonal->inmem[N-i-1]);
       in[N-i-1].i = (kiss_fft_scalar)(w*tonal->inmem[N+N2-i-1]);
    }
    OPUS_MOVE(tonal->inmem, tonal->inmem+ANALYSIS_BUF_SIZE-240, 240);
    remaining = len - (ANALYSIS_BUF_SIZE-tonal->mem_fill);
    downmix(x, &tonal->inmem[240], remaining, offset+ANALYSIS_BUF_SIZE-tonal->mem_fill, c1, c2, C);
    tonal->mem_fill = 240 + remaining;
    opus_fft(kfft, in, out);

    for (i=1;i<N2;i++)
    {
       float X1r, X2r, X1i, X2i;
       float angle, d_angle, d2_angle;
       float angle2, d_angle2, d2_angle2;
       float mod1, mod2, avg_mod;
       X1r = (float)out[i].r+out[N-i].r;
       X1i = (float)out[i].i-out[N-i].i;
       X2r = (float)out[i].i+out[N-i].i;
       X2i = (float)out[N-i].r-out[i].r;

       angle = (float)(.5f/M_PI)*fast_atan2f(X1i, X1r);
       d_angle = angle - A[i];
       d2_angle = d_angle - dA[i];

       angle2 = (float)(.5f/M_PI)*fast_atan2f(X2i, X2r);
       d_angle2 = angle2 - angle;
       d2_angle2 = d_angle2 - d_angle;

       mod1 = d2_angle - (float)floor(.5+d2_angle);
       noisiness[i] = ABS16(mod1);
       mod1 *= mod1;
       mod1 *= mod1;

       mod2 = d2_angle2 - (float)floor(.5+d2_angle2);
       noisiness[i] += ABS16(mod2);
       mod2 *= mod2;
       mod2 *= mod2;

       avg_mod = .25f*(d2A[i]+2.f*mod1+mod2);
       tonality[i] = 1.f/(1.f+40.f*16.f*pi4*avg_mod)-.015f;

       A[i] = angle2;
       dA[i] = d_angle2;
       d2A[i] = mod2;
    }

    frame_tonality = 0;
    max_frame_tonality = 0;
    /*tw_sum = 0;*/
    info->activity = 0;
    frame_noisiness = 0;
    frame_stationarity = 0;
    if (!tonal->count)
    {
       for (b=0;b<NB_TBANDS;b++)
       {
          tonal->lowE[b] = 1e10;
          tonal->highE[b] = -1e10;
       }
    }
    relativeE = 0;
    frame_loudness = 0;
    for (b=0;b<NB_TBANDS;b++)
    {
       float E=0, tE=0, nE=0;
       float L1, L2;
       float stationarity;
       for (i=tbands[b];i<tbands[b+1];i++)
       {
          float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r
                     + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i;
#ifdef FIXED_POINT
          /* FIXME: It's probably best to change the BFCC filter initial state instead */
          binE *= 5.55e-17f;
#endif
          E += binE;
          tE += binE*tonality[i];
          nE += binE*2.f*(.5f-noisiness[i]);
       }
       tonal->E[tonal->E_count][b] = E;
       frame_noisiness += nE/(1e-15f+E);

       frame_loudness += (float)sqrt(E+1e-10f);
       logE[b] = (float)log(E+1e-10f);
       tonal->lowE[b] = MIN32(logE[b], tonal->lowE[b]+.01f);
       tonal->highE[b] = MAX32(logE[b], tonal->highE[b]-.1f);
       if (tonal->highE[b] < tonal->lowE[b]+1.f)
       {
          tonal->highE[b]+=.5f;
          tonal->lowE[b]-=.5f;
       }
       relativeE += (logE[b]-tonal->lowE[b])/(1e-15f+tonal->highE[b]-tonal->lowE[b]);

       L1=L2=0;
       for (i=0;i<NB_FRAMES;i++)
       {
          L1 += (float)sqrt(tonal->E[i][b]);
          L2 += tonal->E[i][b];
       }

       stationarity = MIN16(0.99f,L1/(float)sqrt(1e-15+NB_FRAMES*L2));
       stationarity *= stationarity;
       stationarity *= stationarity;
       frame_stationarity += stationarity;
       /*band_tonality[b] = tE/(1e-15+E)*/;
       band_tonality[b] = MAX16(tE/(1e-15f+E), stationarity*tonal->prev_band_tonality[b]);
#if 0
       if (b>=NB_TONAL_SKIP_BANDS)
       {
          frame_tonality += tweight[b]*band_tonality[b];
          tw_sum += tweight[b];
       }
#else
       frame_tonality += band_tonality[b];
       if (b>=NB_TBANDS-NB_TONAL_SKIP_BANDS)
          frame_tonality -= band_tonality[b-NB_TBANDS+NB_TONAL_SKIP_BANDS];
#endif
       max_frame_tonality = MAX16(max_frame_tonality, (1.f+.03f*(b-NB_TBANDS))*frame_tonality);
       slope += band_tonality[b]*(b-8);
       /*printf("%f %f ", band_tonality[b], stationarity);*/
       tonal->prev_band_tonality[b] = band_tonality[b];
    }

    bandwidth_mask = 0;
    bandwidth = 0;
    maxE = 0;
    noise_floor = 5.7e-4f/(1<<(IMAX(0,lsb_depth-8)));
#ifdef FIXED_POINT
    noise_floor *= 1<<(15+SIG_SHIFT);
#endif
    noise_floor *= noise_floor;
    for (b=0;b<NB_TOT_BANDS;b++)
    {
       float E=0;
       int band_start, band_end;
       /* Keep a margin of 300 Hz for aliasing */
       band_start = extra_bands[b];
       band_end = extra_bands[b+1];
       for (i=band_start;i<band_end;i++)
       {
          float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r
                     + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i;
          E += binE;
       }
       maxE = MAX32(maxE, E);
       tonal->meanE[b] = MAX32((1-alphaE2)*tonal->meanE[b], E);
       E = MAX32(E, tonal->meanE[b]);
       /* Use a simple follower with 13 dB/Bark slope for spreading function */
       bandwidth_mask = MAX32(.05f*bandwidth_mask, E);
       /* Consider the band "active" only if all these conditions are met:
          1) less than 10 dB below the simple follower
          2) less than 90 dB below the peak band (maximal masking possible considering
             both the ATH and the loudness-dependent slope of the spreading function)
          3) above the PCM quantization noise floor
       */
       if (E>.1*bandwidth_mask && E*1e9f > maxE && E > noise_floor*(band_end-band_start))
          bandwidth = b;
    }
    if (tonal->count<=2)
       bandwidth = 20;
    frame_loudness = 20*(float)log10(frame_loudness);
    tonal->Etracker = MAX32(tonal->Etracker-.03f, frame_loudness);
    tonal->lowECount *= (1-alphaE);
    if (frame_loudness < tonal->Etracker-30)
       tonal->lowECount += alphaE;

    for (i=0;i<8;i++)
    {
       float sum=0;
       for (b=0;b<16;b++)
          sum += dct_table[i*16+b]*logE[b];
       BFCC[i] = sum;
    }

    frame_stationarity /= NB_TBANDS;
    relativeE /= NB_TBANDS;
    if (tonal->count<10)
       relativeE = .5;
    frame_noisiness /= NB_TBANDS;
#if 1
    info->activity = frame_noisiness + (1-frame_noisiness)*relativeE;
#else
    info->activity = .5*(1+frame_noisiness-frame_stationarity);
#endif
    frame_tonality = (max_frame_tonality/(NB_TBANDS-NB_TONAL_SKIP_BANDS));
    frame_tonality = MAX16(frame_tonality, tonal->prev_tonality*.8f);
    tonal->prev_tonality = frame_tonality;

    slope /= 8*8;
    info->tonality_slope = slope;

    tonal->E_count = (tonal->E_count+1)%NB_FRAMES;
    tonal->count++;
    info->tonality = frame_tonality;

    for (i=0;i<4;i++)
       features[i] = -0.12299f*(BFCC[i]+tonal->mem[i+24]) + 0.49195f*(tonal->mem[i]+tonal->mem[i+16]) + 0.69693f*tonal->mem[i+8] - 1.4349f*tonal->cmean[i];

    for (i=0;i<4;i++)
       tonal->cmean[i] = (1-alpha)*tonal->cmean[i] + alpha*BFCC[i];

    for (i=0;i<4;i++)
        features[4+i] = 0.63246f*(BFCC[i]-tonal->mem[i+24]) + 0.31623f*(tonal->mem[i]-tonal->mem[i+16]);
    for (i=0;i<3;i++)
        features[8+i] = 0.53452f*(BFCC[i]+tonal->mem[i+24]) - 0.26726f*(tonal->mem[i]+tonal->mem[i+16]) -0.53452f*tonal->mem[i+8];

    if (tonal->count > 5)
    {
       for (i=0;i<9;i++)
          tonal->std[i] = (1-alpha)*tonal->std[i] + alpha*features[i]*features[i];
    }

    for (i=0;i<8;i++)
    {
       tonal->mem[i+24] = tonal->mem[i+16];
       tonal->mem[i+16] = tonal->mem[i+8];
       tonal->mem[i+8] = tonal->mem[i];
       tonal->mem[i] = BFCC[i];
    }
    for (i=0;i<9;i++)
       features[11+i] = (float)sqrt(tonal->std[i]);
    features[20] = info->tonality;
    features[21] = info->activity;
    features[22] = frame_stationarity;
    features[23] = info->tonality_slope;
    features[24] = tonal->lowECount;

#ifndef DISABLE_FLOAT_API
    mlp_process(&net, features, frame_probs);
    frame_probs[0] = .5f*(frame_probs[0]+1);
    /* Curve fitting between the MLP probability and the actual probability */
    frame_probs[0] = .01f + 1.21f*frame_probs[0]*frame_probs[0] - .23f*(float)pow(frame_probs[0], 10);
    /* Probability of active audio (as opposed to silence) */
    frame_probs[1] = .5f*frame_probs[1]+.5f;
    /* Consider that silence has a 50-50 probability. */
    frame_probs[0] = frame_probs[1]*frame_probs[0] + (1-frame_probs[1])*.5f;

    /*printf("%f %f ", frame_probs[0], frame_probs[1]);*/
    {
       /* Probability of state transition */
       float tau;
       /* Represents independence of the MLP probabilities, where
          beta=1 means fully independent. */
       float beta;
       /* Denormalized probability of speech (p0) and music (p1) after update */
       float p0, p1;
       /* Probabilities for "all speech" and "all music" */
       float s0, m0;
       /* Probability sum for renormalisation */
       float psum;
       /* Instantaneous probability of speech and music, with beta pre-applied. */
       float speech0;
       float music0;

       /* One transition every 3 minutes of active audio */
       tau = .00005f*frame_probs[1];
       beta = .05f;
       if (1) {
          /* Adapt beta based on how "unexpected" the new prob is */
          float p, q;
          p = MAX16(.05f,MIN16(.95f,frame_probs[0]));
          q = MAX16(.05f,MIN16(.95f,tonal->music_prob));
          beta = .01f+.05f*ABS16(p-q)/(p*(1-q)+q*(1-p));
       }
       /* p0 and p1 are the probabilities of speech and music at this frame
          using only information from previous frame and applying the
          state transition model */
       p0 = (1-tonal->music_prob)*(1-tau) +    tonal->music_prob *tau;
       p1 =    tonal->music_prob *(1-tau) + (1-tonal->music_prob)*tau;
       /* We apply the current probability with exponent beta to work around
          the fact that the probability estimates aren't independent. */
       p0 *= (float)pow(1-frame_probs[0], beta);
       p1 *= (float)pow(frame_probs[0], beta);
       /* Normalise the probabilities to get the Marokv probability of music. */
       tonal->music_prob = p1/(p0+p1);
       info->music_prob = tonal->music_prob;

       /* This chunk of code deals with delayed decision. */
       psum=1e-20f;
       /* Instantaneous probability of speech and music, with beta pre-applied. */
       speech0 = (float)pow(1-frame_probs[0], beta);
       music0  = (float)pow(frame_probs[0], beta);
       if (tonal->count==1)
       {
          tonal->pspeech[0]=.5;
          tonal->pmusic [0]=.5;
       }
       /* Updated probability of having only speech (s0) or only music (m0),
          before considering the new observation. */
       s0 = tonal->pspeech[0] + tonal->pspeech[1];
       m0 = tonal->pmusic [0] + tonal->pmusic [1];
       /* Updates s0 and m0 with instantaneous probability. */
       tonal->pspeech[0] = s0*(1-tau)*speech0;
       tonal->pmusic [0] = m0*(1-tau)*music0;
       /* Propagate the transition probabilities */
       for (i=1;i<DETECT_SIZE-1;i++)
       {
          tonal->pspeech[i] = tonal->pspeech[i+1]*speech0;
          tonal->pmusic [i] = tonal->pmusic [i+1]*music0;
       }
       /* Probability that the latest frame is speech, when all the previous ones were music. */
       tonal->pspeech[DETECT_SIZE-1] = m0*tau*speech0;
       /* Probability that the latest frame is music, when all the previous ones were speech. */
       tonal->pmusic [DETECT_SIZE-1] = s0*tau*music0;

       /* Renormalise probabilities to 1 */
       for (i=0;i<DETECT_SIZE;i++)
          psum += tonal->pspeech[i] + tonal->pmusic[i];
       psum = 1.f/psum;
       for (i=0;i<DETECT_SIZE;i++)
       {
          tonal->pspeech[i] *= psum;
          tonal->pmusic [i] *= psum;
       }
       psum = tonal->pmusic[0];
       for (i=1;i<DETECT_SIZE;i++)
          psum += tonal->pspeech[i];

       /* Estimate our confidence in the speech/music decisions */
       if (frame_probs[1]>.75)
       {
          if (tonal->music_prob>.9)
          {
             float adapt;
             adapt = 1.f/(++tonal->music_confidence_count);
             tonal->music_confidence_count = IMIN(tonal->music_confidence_count, 500);
             tonal->music_confidence += adapt*MAX16(-.2f,frame_probs[0]-tonal->music_confidence);
          }
          if (tonal->music_prob<.1)
          {
             float adapt;
             adapt = 1.f/(++tonal->speech_confidence_count);
             tonal->speech_confidence_count = IMIN(tonal->speech_confidence_count, 500);
             tonal->speech_confidence += adapt*MIN16(.2f,frame_probs[0]-tonal->speech_confidence);
          }
       } else {
          if (tonal->music_confidence_count==0)
             tonal->music_confidence = .9f;
          if (tonal->speech_confidence_count==0)
             tonal->speech_confidence = .1f;
       }
    }
    if (tonal->last_music != (tonal->music_prob>.5f))
       tonal->last_transition=0;
    tonal->last_music = tonal->music_prob>.5f;
#else
    info->music_prob = 0;
#endif
    /*for (i=0;i<25;i++)
       printf("%f ", features[i]);
    printf("\n");*/

    info->bandwidth = bandwidth;
    /*printf("%d %d\n", info->bandwidth, info->opus_bandwidth);*/
    info->noisiness = frame_noisiness;
    info->valid = 1;
    if (info_out!=NULL)
       OPUS_COPY(info_out, info, 1);
    RESTORE_STACK;
}
Beispiel #12
0
static void tonality_analysis(TonalityAnalysisState *tonal, const CELTMode *celt_mode, const void *x, int len, int offset, int c1, int c2, int C, int lsb_depth, downmix_func downmix)
{
    int i, b;
    const kiss_fft_state *kfft;
    VARDECL(kiss_fft_cpx, in);
    VARDECL(kiss_fft_cpx, out);
    int N = 480, N2=240;
    float * OPUS_RESTRICT A = tonal->angle;
    float * OPUS_RESTRICT dA = tonal->d_angle;
    float * OPUS_RESTRICT d2A = tonal->d2_angle;
    VARDECL(float, tonality);
    VARDECL(float, noisiness);
    float band_tonality[NB_TBANDS];
    float logE[NB_TBANDS];
    float BFCC[8];
    float features[25];
    float frame_tonality;
    float max_frame_tonality;
    /*float tw_sum=0;*/
    float frame_noisiness;
    const float pi4 = (float)(M_PI*M_PI*M_PI*M_PI);
    float slope=0;
    float frame_stationarity;
    float relativeE;
    float frame_probs[2];
    float alpha, alphaE, alphaE2;
    float frame_loudness;
    float bandwidth_mask;
    int bandwidth=0;
    float maxE = 0;
    float noise_floor;
    int remaining;
    AnalysisInfo *info;
    float hp_ener;
    float tonality2[240];
    float midE[8];
    float spec_variability=0;
    float band_log2[NB_TBANDS+1];
    float leakage_from[NB_TBANDS+1];
    float leakage_to[NB_TBANDS+1];
    SAVE_STACK;

    alpha = 1.f/IMIN(10, 1+tonal->count);
    alphaE = 1.f/IMIN(25, 1+tonal->count);
    alphaE2 = 1.f/IMIN(500, 1+tonal->count);

    if (tonal->Fs == 48000)
    {
       /* len and offset are now at 24 kHz. */
       len/= 2;
       offset /= 2;
    } else if (tonal->Fs == 16000) {
       len = 3*len/2;
       offset = 3*offset/2;
    }

    if (tonal->count<4) {
       if (tonal->application == OPUS_APPLICATION_VOIP)
          tonal->music_prob = .1f;
       else
          tonal->music_prob = .625f;
    }
    kfft = celt_mode->mdct.kfft[0];
    if (tonal->count==0)
       tonal->mem_fill = 240;
    tonal->hp_ener_accum += (float)downmix_and_resample(downmix, x,
          &tonal->inmem[tonal->mem_fill], tonal->downmix_state,
          IMIN(len, ANALYSIS_BUF_SIZE-tonal->mem_fill), offset, c1, c2, C, tonal->Fs);
    if (tonal->mem_fill+len < ANALYSIS_BUF_SIZE)
    {
       tonal->mem_fill += len;
       /* Don't have enough to update the analysis */
       RESTORE_STACK;
       return;
    }
    hp_ener = tonal->hp_ener_accum;
    info = &tonal->info[tonal->write_pos++];
    if (tonal->write_pos>=DETECT_SIZE)
       tonal->write_pos-=DETECT_SIZE;

    ALLOC(in, 480, kiss_fft_cpx);
    ALLOC(out, 480, kiss_fft_cpx);
    ALLOC(tonality, 240, float);
    ALLOC(noisiness, 240, float);
    for (i=0;i<N2;i++)
    {
       float w = analysis_window[i];
       in[i].r = (kiss_fft_scalar)(w*tonal->inmem[i]);
       in[i].i = (kiss_fft_scalar)(w*tonal->inmem[N2+i]);
       in[N-i-1].r = (kiss_fft_scalar)(w*tonal->inmem[N-i-1]);
       in[N-i-1].i = (kiss_fft_scalar)(w*tonal->inmem[N+N2-i-1]);
    }
    OPUS_MOVE(tonal->inmem, tonal->inmem+ANALYSIS_BUF_SIZE-240, 240);
    remaining = len - (ANALYSIS_BUF_SIZE-tonal->mem_fill);
    tonal->hp_ener_accum = (float)downmix_and_resample(downmix, x,
          &tonal->inmem[240], tonal->downmix_state, remaining,
          offset+ANALYSIS_BUF_SIZE-tonal->mem_fill, c1, c2, C, tonal->Fs);
    tonal->mem_fill = 240 + remaining;
    opus_fft(kfft, in, out, tonal->arch);
#ifndef FIXED_POINT
    /* If there's any NaN on the input, the entire output will be NaN, so we only need to check one value. */
    if (celt_isnan(out[0].r))
    {
       info->valid = 0;
       RESTORE_STACK;
       return;
    }
#endif

    for (i=1;i<N2;i++)
    {
       float X1r, X2r, X1i, X2i;
       float angle, d_angle, d2_angle;
       float angle2, d_angle2, d2_angle2;
       float mod1, mod2, avg_mod;
       X1r = (float)out[i].r+out[N-i].r;
       X1i = (float)out[i].i-out[N-i].i;
       X2r = (float)out[i].i+out[N-i].i;
       X2i = (float)out[N-i].r-out[i].r;

       angle = (float)(.5f/M_PI)*fast_atan2f(X1i, X1r);
       d_angle = angle - A[i];
       d2_angle = d_angle - dA[i];

       angle2 = (float)(.5f/M_PI)*fast_atan2f(X2i, X2r);
       d_angle2 = angle2 - angle;
       d2_angle2 = d_angle2 - d_angle;

       mod1 = d2_angle - (float)float2int(d2_angle);
       noisiness[i] = ABS16(mod1);
       mod1 *= mod1;
       mod1 *= mod1;

       mod2 = d2_angle2 - (float)float2int(d2_angle2);
       noisiness[i] += ABS16(mod2);
       mod2 *= mod2;
       mod2 *= mod2;

       avg_mod = .25f*(d2A[i]+mod1+2*mod2);
       /* This introduces an extra delay of 2 frames in the detection. */
       tonality[i] = 1.f/(1.f+40.f*16.f*pi4*avg_mod)-.015f;
       /* No delay on this detection, but it's less reliable. */
       tonality2[i] = 1.f/(1.f+40.f*16.f*pi4*mod2)-.015f;

       A[i] = angle2;
       dA[i] = d_angle2;
       d2A[i] = mod2;
    }
    for (i=2;i<N2-1;i++)
    {
       float tt = MIN32(tonality2[i], MAX32(tonality2[i-1], tonality2[i+1]));
       tonality[i] = .9f*MAX32(tonality[i], tt-.1f);
    }
    frame_tonality = 0;
    max_frame_tonality = 0;
    /*tw_sum = 0;*/
    info->activity = 0;
    frame_noisiness = 0;
    frame_stationarity = 0;
    if (!tonal->count)
    {
       for (b=0;b<NB_TBANDS;b++)
       {
          tonal->lowE[b] = 1e10;
          tonal->highE[b] = -1e10;
       }
    }
    relativeE = 0;
    frame_loudness = 0;
    /* The energy of the very first band is special because of DC. */
    {
       float E = 0;
       float X1r, X2r;
       X1r = 2*(float)out[0].r;
       X2r = 2*(float)out[0].i;
       E = X1r*X1r + X2r*X2r;
       for (i=1;i<4;i++)
       {
          float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r
                     + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i;
          E += binE;
       }
       E = SCALE_ENER(E);
       band_log2[0] = .5f*1.442695f*(float)log(E+1e-10f);
    }
    for (b=0;b<NB_TBANDS;b++)
    {
       float E=0, tE=0, nE=0;
       float L1, L2;
       float stationarity;
       for (i=tbands[b];i<tbands[b+1];i++)
       {
          float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r
                     + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i;
          binE = SCALE_ENER(binE);
          E += binE;
          tE += binE*MAX32(0, tonality[i]);
          nE += binE*2.f*(.5f-noisiness[i]);
       }
#ifndef FIXED_POINT
       /* Check for extreme band energies that could cause NaNs later. */
       if (!(E<1e9f) || celt_isnan(E))
       {
          info->valid = 0;
          RESTORE_STACK;
          return;
       }
#endif

       tonal->E[tonal->E_count][b] = E;
       frame_noisiness += nE/(1e-15f+E);

       frame_loudness += (float)sqrt(E+1e-10f);
       logE[b] = (float)log(E+1e-10f);
       band_log2[b+1] = .5f*1.442695f*(float)log(E+1e-10f);
       tonal->logE[tonal->E_count][b] = logE[b];
       if (tonal->count==0)
          tonal->highE[b] = tonal->lowE[b] = logE[b];
       if (tonal->highE[b] > tonal->lowE[b] + 7.5)
       {
          if (tonal->highE[b] - logE[b] > logE[b] - tonal->lowE[b])
             tonal->highE[b] -= .01f;
          else
             tonal->lowE[b] += .01f;
       }
       if (logE[b] > tonal->highE[b])
       {
          tonal->highE[b] = logE[b];
          tonal->lowE[b] = MAX32(tonal->highE[b]-15, tonal->lowE[b]);
       } else if (logE[b] < tonal->lowE[b])
       {
          tonal->lowE[b] = logE[b];
          tonal->highE[b] = MIN32(tonal->lowE[b]+15, tonal->highE[b]);
       }
       relativeE += (logE[b]-tonal->lowE[b])/(1e-15f + (tonal->highE[b]-tonal->lowE[b]));

       L1=L2=0;
       for (i=0;i<NB_FRAMES;i++)
       {
          L1 += (float)sqrt(tonal->E[i][b]);
          L2 += tonal->E[i][b];
       }

       stationarity = MIN16(0.99f,L1/(float)sqrt(1e-15+NB_FRAMES*L2));
       stationarity *= stationarity;
       stationarity *= stationarity;
       frame_stationarity += stationarity;
       /*band_tonality[b] = tE/(1e-15+E)*/;
       band_tonality[b] = MAX16(tE/(1e-15f+E), stationarity*tonal->prev_band_tonality[b]);
#if 0
       if (b>=NB_TONAL_SKIP_BANDS)
       {
          frame_tonality += tweight[b]*band_tonality[b];
          tw_sum += tweight[b];
       }
#else
       frame_tonality += band_tonality[b];
       if (b>=NB_TBANDS-NB_TONAL_SKIP_BANDS)
          frame_tonality -= band_tonality[b-NB_TBANDS+NB_TONAL_SKIP_BANDS];
#endif
       max_frame_tonality = MAX16(max_frame_tonality, (1.f+.03f*(b-NB_TBANDS))*frame_tonality);
       slope += band_tonality[b]*(b-8);
       /*printf("%f %f ", band_tonality[b], stationarity);*/
       tonal->prev_band_tonality[b] = band_tonality[b];
    }

    leakage_from[0] = band_log2[0];
    leakage_to[0] = band_log2[0] - LEAKAGE_OFFSET;
    for (b=1;b<NB_TBANDS+1;b++)
    {
       float leak_slope = LEAKAGE_SLOPE*(tbands[b]-tbands[b-1])/4;
       leakage_from[b] = MIN16(leakage_from[b-1]+leak_slope, band_log2[b]);
       leakage_to[b] = MAX16(leakage_to[b-1]-leak_slope, band_log2[b]-LEAKAGE_OFFSET);
    }
    for (b=NB_TBANDS-2;b>=0;b--)
    {
       float leak_slope = LEAKAGE_SLOPE*(tbands[b+1]-tbands[b])/4;
       leakage_from[b] = MIN16(leakage_from[b+1]+leak_slope, leakage_from[b]);
       leakage_to[b] = MAX16(leakage_to[b+1]-leak_slope, leakage_to[b]);
    }
    celt_assert(NB_TBANDS+1 <= LEAK_BANDS);
    for (b=0;b<NB_TBANDS+1;b++)
    {
       /* leak_boost[] is made up of two terms. The first, based on leakage_to[],
          represents the boost needed to overcome the amount of analysis leakage
          cause in a weaker band b by louder neighbouring bands.
          The second, based on leakage_from[], applies to a loud band b for
          which the quantization noise causes synthesis leakage to the weaker
          neighbouring bands. */
       float boost = MAX16(0, leakage_to[b] - band_log2[b]) +
             MAX16(0, band_log2[b] - (leakage_from[b]+LEAKAGE_OFFSET));
       info->leak_boost[b] = IMIN(255, (int)floor(.5 + 64.f*boost));
    }
    for (;b<LEAK_BANDS;b++) info->leak_boost[b] = 0;

    for (i=0;i<NB_FRAMES;i++)
    {
       int j;
       float mindist = 1e15f;
       for (j=0;j<NB_FRAMES;j++)
       {
          int k;
          float dist=0;
          for (k=0;k<NB_TBANDS;k++)
          {
             float tmp;
             tmp = tonal->logE[i][k] - tonal->logE[j][k];
             dist += tmp*tmp;
          }
          if (j!=i)
             mindist = MIN32(mindist, dist);
       }
       spec_variability += mindist;
    }
    spec_variability = (float)sqrt(spec_variability/NB_FRAMES/NB_TBANDS);
    bandwidth_mask = 0;
    bandwidth = 0;
    maxE = 0;
    noise_floor = 5.7e-4f/(1<<(IMAX(0,lsb_depth-8)));
    noise_floor *= noise_floor;
    for (b=0;b<NB_TBANDS;b++)
    {
       float E=0;
       int band_start, band_end;
       /* Keep a margin of 300 Hz for aliasing */
       band_start = tbands[b];
       band_end = tbands[b+1];
       for (i=band_start;i<band_end;i++)
       {
          float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r
                     + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i;
          E += binE;
       }
       E = SCALE_ENER(E);
       maxE = MAX32(maxE, E);
       tonal->meanE[b] = MAX32((1-alphaE2)*tonal->meanE[b], E);
       E = MAX32(E, tonal->meanE[b]);
       /* Use a simple follower with 13 dB/Bark slope for spreading function */
       bandwidth_mask = MAX32(.05f*bandwidth_mask, E);
       /* Consider the band "active" only if all these conditions are met:
          1) less than 10 dB below the simple follower
          2) less than 90 dB below the peak band (maximal masking possible considering
             both the ATH and the loudness-dependent slope of the spreading function)
          3) above the PCM quantization noise floor
          We use b+1 because the first CELT band isn't included in tbands[]
       */
       if (E>.1*bandwidth_mask && E*1e9f > maxE && E > noise_floor*(band_end-band_start))
          bandwidth = b+1;
    }
    /* Special case for the last two bands, for which we don't have spectrum but only
       the energy above 12 kHz. */
    if (tonal->Fs == 48000) {
       float ratio;
       float E = hp_ener*(1.f/(240*240));
       ratio = tonal->prev_bandwidth==20 ? 0.03f : 0.07f;
#ifdef FIXED_POINT
       /* silk_resampler_down2_hp() shifted right by an extra 8 bits. */
       E *= 256.f*(1.f/Q15ONE)*(1.f/Q15ONE);
#endif
       maxE = MAX32(maxE, E);
       tonal->meanE[b] = MAX32((1-alphaE2)*tonal->meanE[b], E);
       E = MAX32(E, tonal->meanE[b]);
       /* Use a simple follower with 13 dB/Bark slope for spreading function */
       bandwidth_mask = MAX32(.05f*bandwidth_mask, E);
       if (E>ratio*bandwidth_mask && E*1e9f > maxE && E > noise_floor*160)
          bandwidth = 20;
       /* This detector is unreliable, so if the bandwidth is close to SWB, assume it's FB. */
       if (bandwidth >= 17)
          bandwidth = 20;
    }
    if (tonal->count<=2)
       bandwidth = 20;
    frame_loudness = 20*(float)log10(frame_loudness);
    tonal->Etracker = MAX32(tonal->Etracker-.003f, frame_loudness);
    tonal->lowECount *= (1-alphaE);
    if (frame_loudness < tonal->Etracker-30)
       tonal->lowECount += alphaE;

    for (i=0;i<8;i++)
    {
       float sum=0;
       for (b=0;b<16;b++)
          sum += dct_table[i*16+b]*logE[b];
       BFCC[i] = sum;
    }
    for (i=0;i<8;i++)
    {
       float sum=0;
       for (b=0;b<16;b++)
          sum += dct_table[i*16+b]*.5f*(tonal->highE[b]+tonal->lowE[b]);
       midE[i] = sum;
    }

    frame_stationarity /= NB_TBANDS;
    relativeE /= NB_TBANDS;
    if (tonal->count<10)
       relativeE = .5f;
    frame_noisiness /= NB_TBANDS;
#if 1
    info->activity = frame_noisiness + (1-frame_noisiness)*relativeE;
#else
    info->activity = .5*(1+frame_noisiness-frame_stationarity);
#endif
    frame_tonality = (max_frame_tonality/(NB_TBANDS-NB_TONAL_SKIP_BANDS));
    frame_tonality = MAX16(frame_tonality, tonal->prev_tonality*.8f);
    tonal->prev_tonality = frame_tonality;

    slope /= 8*8;
    info->tonality_slope = slope;

    tonal->E_count = (tonal->E_count+1)%NB_FRAMES;
    tonal->count = IMIN(tonal->count+1, ANALYSIS_COUNT_MAX);
    info->tonality = frame_tonality;

    for (i=0;i<4;i++)
       features[i] = -0.12299f*(BFCC[i]+tonal->mem[i+24]) + 0.49195f*(tonal->mem[i]+tonal->mem[i+16]) + 0.69693f*tonal->mem[i+8] - 1.4349f*tonal->cmean[i];

    for (i=0;i<4;i++)
       tonal->cmean[i] = (1-alpha)*tonal->cmean[i] + alpha*BFCC[i];

    for (i=0;i<4;i++)
        features[4+i] = 0.63246f*(BFCC[i]-tonal->mem[i+24]) + 0.31623f*(tonal->mem[i]-tonal->mem[i+16]);
    for (i=0;i<3;i++)
        features[8+i] = 0.53452f*(BFCC[i]+tonal->mem[i+24]) - 0.26726f*(tonal->mem[i]+tonal->mem[i+16]) -0.53452f*tonal->mem[i+8];

    if (tonal->count > 5)
    {
       for (i=0;i<9;i++)
          tonal->std[i] = (1-alpha)*tonal->std[i] + alpha*features[i]*features[i];
    }
    for (i=0;i<4;i++)
       features[i] = BFCC[i]-midE[i];

    for (i=0;i<8;i++)
    {
       tonal->mem[i+24] = tonal->mem[i+16];
       tonal->mem[i+16] = tonal->mem[i+8];
       tonal->mem[i+8] = tonal->mem[i];
       tonal->mem[i] = BFCC[i];
    }
    for (i=0;i<9;i++)
       features[11+i] = (float)sqrt(tonal->std[i]) - std_feature_bias[i];
    features[18] = spec_variability - 0.78f;
    features[20] = info->tonality - 0.154723f;
    features[21] = info->activity - 0.724643f;
    features[22] = frame_stationarity - 0.743717f;
    features[23] = info->tonality_slope + 0.069216f;
    features[24] = tonal->lowECount - 0.067930f;

    mlp_process(&net, features, frame_probs);
    frame_probs[0] = .5f*(frame_probs[0]+1);
    /* Curve fitting between the MLP probability and the actual probability */
    /*frame_probs[0] = .01f + 1.21f*frame_probs[0]*frame_probs[0] - .23f*(float)pow(frame_probs[0], 10);*/
    /* Probability of active audio (as opposed to silence) */
    frame_probs[1] = .5f*frame_probs[1]+.5f;
    frame_probs[1] *= frame_probs[1];

    /* Probability of speech or music vs noise */
    info->activity_probability = frame_probs[1];

    /*printf("%f %f\n", frame_probs[0], frame_probs[1]);*/
    {
       /* Probability of state transition */
       float tau;
       /* Represents independence of the MLP probabilities, where
          beta=1 means fully independent. */
       float beta;
       /* Denormalized probability of speech (p0) and music (p1) after update */
       float p0, p1;
       /* Probabilities for "all speech" and "all music" */
       float s0, m0;
       /* Probability sum for renormalisation */
       float psum;
       /* Instantaneous probability of speech and music, with beta pre-applied. */
       float speech0;
       float music0;
       float p, q;

       /* More silence transitions for speech than for music. */
       tau = .001f*tonal->music_prob + .01f*(1-tonal->music_prob);
       p = MAX16(.05f,MIN16(.95f,frame_probs[1]));
       q = MAX16(.05f,MIN16(.95f,tonal->vad_prob));
       beta = .02f+.05f*ABS16(p-q)/(p*(1-q)+q*(1-p));
       /* p0 and p1 are the probabilities of speech and music at this frame
          using only information from previous frame and applying the
          state transition model */
       p0 = (1-tonal->vad_prob)*(1-tau) +    tonal->vad_prob *tau;
       p1 =    tonal->vad_prob *(1-tau) + (1-tonal->vad_prob)*tau;
       /* We apply the current probability with exponent beta to work around
          the fact that the probability estimates aren't independent. */
       p0 *= (float)pow(1-frame_probs[1], beta);
       p1 *= (float)pow(frame_probs[1], beta);
       /* Normalise the probabilities to get the Marokv probability of music. */
       tonal->vad_prob = p1/(p0+p1);
       info->vad_prob = tonal->vad_prob;
       /* Consider that silence has a 50-50 probability of being speech or music. */
       frame_probs[0] = tonal->vad_prob*frame_probs[0] + (1-tonal->vad_prob)*.5f;

       /* One transition every 3 minutes of active audio */
       tau = .0001f;
       /* Adapt beta based on how "unexpected" the new prob is */
       p = MAX16(.05f,MIN16(.95f,frame_probs[0]));
       q = MAX16(.05f,MIN16(.95f,tonal->music_prob));
       beta = .02f+.05f*ABS16(p-q)/(p*(1-q)+q*(1-p));
       /* p0 and p1 are the probabilities of speech and music at this frame
          using only information from previous frame and applying the
          state transition model */
       p0 = (1-tonal->music_prob)*(1-tau) +    tonal->music_prob *tau;
       p1 =    tonal->music_prob *(1-tau) + (1-tonal->music_prob)*tau;
       /* We apply the current probability with exponent beta to work around
          the fact that the probability estimates aren't independent. */
       p0 *= (float)pow(1-frame_probs[0], beta);
       p1 *= (float)pow(frame_probs[0], beta);
       /* Normalise the probabilities to get the Marokv probability of music. */
       tonal->music_prob = p1/(p0+p1);
       info->music_prob = tonal->music_prob;

       /*printf("%f %f %f %f\n", frame_probs[0], frame_probs[1], tonal->music_prob, tonal->vad_prob);*/
       /* This chunk of code deals with delayed decision. */
       psum=1e-20f;
       /* Instantaneous probability of speech and music, with beta pre-applied. */
       speech0 = (float)pow(1-frame_probs[0], beta);
       music0  = (float)pow(frame_probs[0], beta);
       if (tonal->count==1)
       {
          if (tonal->application == OPUS_APPLICATION_VOIP)
             tonal->pmusic[0] = .1f;
          else
             tonal->pmusic[0] = .625f;
          tonal->pspeech[0] = 1-tonal->pmusic[0];
       }
       /* Updated probability of having only speech (s0) or only music (m0),
          before considering the new observation. */
       s0 = tonal->pspeech[0] + tonal->pspeech[1];
       m0 = tonal->pmusic [0] + tonal->pmusic [1];
       /* Updates s0 and m0 with instantaneous probability. */
       tonal->pspeech[0] = s0*(1-tau)*speech0;
       tonal->pmusic [0] = m0*(1-tau)*music0;
       /* Propagate the transition probabilities */
       for (i=1;i<DETECT_SIZE-1;i++)
       {
          tonal->pspeech[i] = tonal->pspeech[i+1]*speech0;
          tonal->pmusic [i] = tonal->pmusic [i+1]*music0;
       }
       /* Probability that the latest frame is speech, when all the previous ones were music. */
       tonal->pspeech[DETECT_SIZE-1] = m0*tau*speech0;
       /* Probability that the latest frame is music, when all the previous ones were speech. */
       tonal->pmusic [DETECT_SIZE-1] = s0*tau*music0;

       /* Renormalise probabilities to 1 */
       for (i=0;i<DETECT_SIZE;i++)
          psum += tonal->pspeech[i] + tonal->pmusic[i];
       psum = 1.f/psum;
       for (i=0;i<DETECT_SIZE;i++)
       {
          tonal->pspeech[i] *= psum;
          tonal->pmusic [i] *= psum;
       }
       psum = tonal->pmusic[0];
       for (i=1;i<DETECT_SIZE;i++)
          psum += tonal->pspeech[i];

       /* Estimate our confidence in the speech/music decisions */
       if (frame_probs[1]>.75)
       {
          if (tonal->music_prob>.9)
          {
             float adapt;
             adapt = 1.f/(++tonal->music_confidence_count);
             tonal->music_confidence_count = IMIN(tonal->music_confidence_count, 500);
             tonal->music_confidence += adapt*MAX16(-.2f,frame_probs[0]-tonal->music_confidence);
          }
          if (tonal->music_prob<.1)
          {
             float adapt;
             adapt = 1.f/(++tonal->speech_confidence_count);
             tonal->speech_confidence_count = IMIN(tonal->speech_confidence_count, 500);
             tonal->speech_confidence += adapt*MIN16(.2f,frame_probs[0]-tonal->speech_confidence);
          }
       }
    }
    tonal->last_music = tonal->music_prob>.5f;
#ifdef MLP_TRAINING
    for (i=0;i<25;i++)
       printf("%f ", features[i]);
    printf("\n");
#endif

    info->bandwidth = bandwidth;
    tonal->prev_bandwidth = bandwidth;
    /*printf("%d %d\n", info->bandwidth, info->opus_bandwidth);*/
    info->noisiness = frame_noisiness;
    info->valid = 1;
    RESTORE_STACK;
}
static int quant_coarse_energy_impl(const CELTMode *m, int start, int end,
      const opus_val16 *eBands, opus_val16 *oldEBands,
      opus_int32 budget, opus_int32 tell,
      const unsigned char *prob_model, opus_val16 *error, ec_enc *enc,
      int C, int LM, int intra, opus_val16 max_decay)
{
   int i, c;
   int badness = 0;
   opus_val32 prev[2] = {0,0};
   opus_val16 coef;
   opus_val16 beta;

   if (tell+3 <= budget)
      ec_enc_bit_logp(enc, intra, 3);
   if (intra)
   {
      coef = 0;
      beta = beta_intra;
   } else {
      beta = beta_coef[LM];
      coef = pred_coef[LM];
   }

   /* Encode at a fixed coarse resolution */
   for (i=start;i<end;i++)
   {
      c=0;
      do {
         int bits_left;
         int qi, qi0;
         opus_val32 q;
         opus_val16 x;
         opus_val32 f, tmp;
         opus_val16 oldE;
         opus_val16 decay_bound;
         x = eBands[i+c*m->nbEBands];
         oldE = MAX16(-QCONST16(9.f,DB_SHIFT), oldEBands[i+c*m->nbEBands]);
#ifdef FIXED_POINT
         f = SHL32(EXTEND32(x),7) - PSHR32(MULT16_16(coef,oldE), 8) - prev[c];
         /* Rounding to nearest integer here is really important! */
         qi = (f+QCONST32(.5f,DB_SHIFT+7))>>(DB_SHIFT+7);
         decay_bound = EXTRACT16(MAX32(-QCONST16(28.f,DB_SHIFT),
               SUB32((opus_val32)oldEBands[i+c*m->nbEBands],max_decay)));
#else
         f = x-coef*oldE-prev[c];
         /* Rounding to nearest integer here is really important! */
         qi = (int)floor(.5f+f);
         decay_bound = MAX16(-QCONST16(28.f,DB_SHIFT), oldEBands[i+c*m->nbEBands]) - max_decay;
#endif
         /* Prevent the energy from going down too quickly (e.g. for bands
            that have just one bin) */
         if (qi < 0 && x < decay_bound)
         {
            qi += (int)SHR16(SUB16(decay_bound,x), DB_SHIFT);
            if (qi > 0)
               qi = 0;
         }
         qi0 = qi;
         /* If we don't have enough bits to encode all the energy, just assume
             something safe. */
         tell = ec_tell(enc);
         bits_left = budget-tell-3*C*(end-i);
         if (i!=start && bits_left < 30)
         {
            if (bits_left < 24)
               qi = IMIN(1, qi);
            if (bits_left < 16)
               qi = IMAX(-1, qi);
         }
         if (budget-tell >= 15)
         {
            int pi;
            pi = 2*IMIN(i,20);
            ec_laplace_encode(enc, &qi,
                  prob_model[pi]<<7, prob_model[pi+1]<<6);
         }
         else if(budget-tell >= 2)
         {
            qi = IMAX(-1, IMIN(qi, 1));
            ec_enc_icdf(enc, 2*qi^-(qi<0), small_energy_icdf, 2);
         }
         else if(budget-tell >= 1)
         {
            qi = IMIN(0, qi);
            ec_enc_bit_logp(enc, -qi, 1);
         }
         else
            qi = -1;
         error[i+c*m->nbEBands] = PSHR32(f,7) - SHL16(qi,DB_SHIFT);
         badness += abs(qi0-qi);
         q = (opus_val32)SHL32(EXTEND32(qi),DB_SHIFT);

         tmp = PSHR32(MULT16_16(coef,oldE),8) + prev[c] + SHL32(q,7);
#ifdef FIXED_POINT
         tmp = MAX32(-QCONST32(28.f, DB_SHIFT+7), tmp);
#endif
         oldEBands[i+c*m->nbEBands] = PSHR32(tmp, 7);
         prev[c] = prev[c] + SHL32(q,7) - MULT16_16(beta,PSHR32(q,8));
      } while (++c < C);
   }
   return badness;
}
Beispiel #14
0
void surround_analysis(const CELTMode *celt_mode, const void *pcm, opus_val16 *bandLogE, opus_val32 *mem, opus_val32 *preemph_mem,
      int len, int overlap, int channels, int rate, opus_copy_channel_in_func copy_channel_in, int arch
)
{
   int c;
   int i;
   int LM;
   int pos[8] = {0};
   int upsample;
   int frame_size;
   int freq_size;
   opus_val16 channel_offset;
   opus_val32 bandE[21];
   opus_val16 maskLogE[3][21];
   VARDECL(opus_val32, in);
   VARDECL(opus_val16, x);
   VARDECL(opus_val32, freq);
   SAVE_STACK;

   upsample = resampling_factor(rate);
   frame_size = len*upsample;
   freq_size = IMIN(960, frame_size);

   /* LM = log2(frame_size / 120) */
   for (LM=0;LM<celt_mode->maxLM;LM++)
      if (celt_mode->shortMdctSize<<LM==frame_size)
         break;

   ALLOC(in, frame_size+overlap, opus_val32);
   ALLOC(x, len, opus_val16);
   ALLOC(freq, freq_size, opus_val32);

   channel_pos(channels, pos);

   for (c=0;c<3;c++)
      for (i=0;i<21;i++)
         maskLogE[c][i] = -QCONST16(28.f, DB_SHIFT);

   for (c=0;c<channels;c++)
   {
      int frame;
      int nb_frames = frame_size/freq_size;
      celt_assert(nb_frames*freq_size == frame_size);
      OPUS_COPY(in, mem+c*overlap, overlap);
      (*copy_channel_in)(x, 1, pcm, channels, c, len);
      celt_preemphasis(x, in+overlap, frame_size, 1, upsample, celt_mode->preemph, preemph_mem+c, 0);
#ifndef FIXED_POINT
      {
         opus_val32 sum;
         sum = celt_inner_prod(in, in, frame_size+overlap, 0);
         /* This should filter out both NaNs and ridiculous signals that could
            cause NaNs further down. */
         if (!(sum < 1e9f) || celt_isnan(sum))
         {
            OPUS_CLEAR(in, frame_size+overlap);
            preemph_mem[c] = 0;
         }
      }
#endif
      OPUS_CLEAR(bandE, 21);
      for (frame=0;frame<nb_frames;frame++)
      {
         opus_val32 tmpE[21];
         clt_mdct_forward(&celt_mode->mdct, in+960*frame, freq, celt_mode->window,
               overlap, celt_mode->maxLM-LM, 1, arch);
         if (upsample != 1)
         {
            int bound = freq_size/upsample;
            for (i=0;i<bound;i++)
               freq[i] *= upsample;
            for (;i<freq_size;i++)
               freq[i] = 0;
         }

         compute_band_energies(celt_mode, freq, tmpE, 21, 1, LM);
         /* If we have multiple frames, take the max energy. */
         for (i=0;i<21;i++)
            bandE[i] = MAX32(bandE[i], tmpE[i]);
      }
      amp2Log2(celt_mode, 21, 21, bandE, bandLogE+21*c, 1);
      /* Apply spreading function with -6 dB/band going up and -12 dB/band going down. */
      for (i=1;i<21;i++)
         bandLogE[21*c+i] = MAX16(bandLogE[21*c+i], bandLogE[21*c+i-1]-QCONST16(1.f, DB_SHIFT));
      for (i=19;i>=0;i--)
         bandLogE[21*c+i] = MAX16(bandLogE[21*c+i], bandLogE[21*c+i+1]-QCONST16(2.f, DB_SHIFT));
      if (pos[c]==1)
      {
         for (i=0;i<21;i++)
            maskLogE[0][i] = logSum(maskLogE[0][i], bandLogE[21*c+i]);
      } else if (pos[c]==3)
      {
         for (i=0;i<21;i++)
            maskLogE[2][i] = logSum(maskLogE[2][i], bandLogE[21*c+i]);
      } else if (pos[c]==2)
      {
         for (i=0;i<21;i++)
         {
            maskLogE[0][i] = logSum(maskLogE[0][i], bandLogE[21*c+i]-QCONST16(.5f, DB_SHIFT));
            maskLogE[2][i] = logSum(maskLogE[2][i], bandLogE[21*c+i]-QCONST16(.5f, DB_SHIFT));
         }
      }
#if 0
      for (i=0;i<21;i++)
         printf("%f ", bandLogE[21*c+i]);
      float sum=0;
      for (i=0;i<21;i++)
         sum += bandLogE[21*c+i];
      printf("%f ", sum/21);
#endif
      OPUS_COPY(mem+c*overlap, in+frame_size, overlap);
   }
   for (i=0;i<21;i++)
      maskLogE[1][i] = MIN32(maskLogE[0][i],maskLogE[2][i]);
   channel_offset = HALF16(celt_log2(QCONST32(2.f,14)/(channels-1)));
   for (c=0;c<3;c++)
      for (i=0;i<21;i++)
         maskLogE[c][i] += channel_offset;
#if 0
   for (c=0;c<3;c++)
   {
      for (i=0;i<21;i++)
         printf("%f ", maskLogE[c][i]);
   }
#endif
   for (c=0;c<channels;c++)
   {
      opus_val16 *mask;
      if (pos[c]!=0)
      {
         mask = &maskLogE[pos[c]-1][0];
         for (i=0;i<21;i++)
            bandLogE[21*c+i] = bandLogE[21*c+i] - mask[i];
      } else {
         for (i=0;i<21;i++)
            bandLogE[21*c+i] = 0;
      }
#if 0
      for (i=0;i<21;i++)
         printf("%f ", bandLogE[21*c+i]);
      printf("\n");
#endif
#if 0
      float sum=0;
      for (i=0;i<21;i++)
         sum += bandLogE[21*c+i];
      printf("%f ", sum/(float)QCONST32(21.f, DB_SHIFT));
      printf("\n");
#endif
   }
   RESTORE_STACK;
}
Beispiel #15
0
/** Performs echo cancellation on a frame */
EXPORT void speex_echo_cancellation(SpeexEchoState *st, const spx_int16_t *in, const spx_int16_t *far_end, spx_int16_t *out)
{
   int i,j, chan, speak;
   int N,M, C, K;
   spx_word32_t Syy,See,Sxx,Sdd, Sff;
#ifdef TWO_PATH
   spx_word32_t Dbf;
   int update_foreground;
#endif
   spx_word32_t Sey;
   spx_word16_t ss, ss_1;
   spx_float_t Pey = FLOAT_ONE, Pyy=FLOAT_ONE;
   spx_float_t alpha, alpha_1;
   spx_word16_t RER;
   spx_word32_t tmp32;
   
   N = st->window_size;
   M = st->M;
   C = st->C;
   K = st->K;

   st->cancel_count++;
#ifdef FIXED_POINT
   ss=DIV32_16(11469,M);
   ss_1 = SUB16(32767,ss);
#else
   ss=.35/M;
   ss_1 = 1-ss;
#endif

   for (chan = 0; chan < C; chan++)
   {
      /* Apply a notch filter to make sure DC doesn't end up causing problems */
      filter_dc_notch16(in+chan, st->notch_radius, st->input+chan*st->frame_size, st->frame_size, st->notch_mem+2*chan, C);
      /* Copy input data to buffer and apply pre-emphasis */
      /* Copy input data to buffer */
      for (i=0;i<st->frame_size;i++)
      {
         spx_word32_t tmp32;
         /* FIXME: This core has changed a bit, need to merge properly */
         tmp32 = SUB32(EXTEND32(st->input[chan*st->frame_size+i]), EXTEND32(MULT16_16_P15(st->preemph, st->memD[chan])));
#ifdef FIXED_POINT
         if (tmp32 > 32767)
         {
            tmp32 = 32767;
            if (st->saturated == 0)
               st->saturated = 1;
         }      
         if (tmp32 < -32767)
         {
            tmp32 = -32767;
            if (st->saturated == 0)
               st->saturated = 1;
         }
#endif
         st->memD[chan] = st->input[chan*st->frame_size+i];
         st->input[chan*st->frame_size+i] = EXTRACT16(tmp32);
      }
   }

   for (speak = 0; speak < K; speak++)
   {
      for (i=0;i<st->frame_size;i++)
      {
         spx_word32_t tmp32;
         st->x[speak*N+i] = st->x[speak*N+i+st->frame_size];
         tmp32 = SUB32(EXTEND32(far_end[i*K+speak]), EXTEND32(MULT16_16_P15(st->preemph, st->memX[speak])));
#ifdef FIXED_POINT
         /*FIXME: If saturation occurs here, we need to freeze adaptation for M frames (not just one) */
         if (tmp32 > 32767)
         {
            tmp32 = 32767;
            st->saturated = M+1;
         }      
         if (tmp32 < -32767)
         {
            tmp32 = -32767;
            st->saturated = M+1;
         }      
#endif
         st->x[speak*N+i+st->frame_size] = EXTRACT16(tmp32);
         st->memX[speak] = far_end[i*K+speak];
      }
   }   
   
   for (speak = 0; speak < K; speak++)
   {
      /* Shift memory: this could be optimized eventually*/
      for (j=M-1;j>=0;j--)
      {
         for (i=0;i<N;i++)
            st->X[(j+1)*N*K+speak*N+i] = st->X[j*N*K+speak*N+i];
      }
      /* Convert x (echo input) to frequency domain */
      spx_fft(st->fft_table, st->x+speak*N, &st->X[speak*N]);
   }
   
   Sxx = 0;
   for (speak = 0; speak < K; speak++)
   {
      Sxx += mdf_inner_prod(st->x+speak*N+st->frame_size, st->x+speak*N+st->frame_size, st->frame_size);
      power_spectrum_accum(st->X+speak*N, st->Xf, N);
   }
   
   Sff = 0;  
   for (chan = 0; chan < C; chan++)
   {
#ifdef TWO_PATH
      /* Compute foreground filter */
      spectral_mul_accum16(st->X, st->foreground+chan*N*K*M, st->Y+chan*N, N, M*K);
      spx_ifft(st->fft_table, st->Y+chan*N, st->e+chan*N);
      for (i=0;i<st->frame_size;i++)
         st->e[chan*N+i] = SUB16(st->input[chan*st->frame_size+i], st->e[chan*N+i+st->frame_size]);
      Sff += mdf_inner_prod(st->e+chan*N, st->e+chan*N, st->frame_size);
#endif
   }
   
   /* Adjust proportional adaption rate */
   /* FIXME: Adjust that for C, K*/
   if (st->adapted)
      mdf_adjust_prop (st->W, N, M, C*K, st->prop);
   /* Compute weight gradient */
   if (st->saturated == 0)
   {
      for (chan = 0; chan < C; chan++)
      {
         for (speak = 0; speak < K; speak++)
         {
            for (j=M-1;j>=0;j--)
            {
               weighted_spectral_mul_conj(st->power_1, FLOAT_SHL(PSEUDOFLOAT(st->prop[j]),-15), &st->X[(j+1)*N*K+speak*N], st->E+chan*N, st->PHI, N);
               for (i=0;i<N;i++)
                  st->W[chan*N*K*M + j*N*K + speak*N + i] += st->PHI[i];
            }
         }
      }
   } else {
      st->saturated--;
   }
   
   /* FIXME: MC conversion required */ 
   /* Update weight to prevent circular convolution (MDF / AUMDF) */
   for (chan = 0; chan < C; chan++)
   {
      for (speak = 0; speak < K; speak++)
      {
         for (j=0;j<M;j++)
         {
            /* This is a variant of the Alternatively Updated MDF (AUMDF) */
            /* Remove the "if" to make this an MDF filter */
            if (j==0 || st->cancel_count%(M-1) == j-1)
            {
#ifdef FIXED_POINT
               for (i=0;i<N;i++)
                  st->wtmp2[i] = EXTRACT16(PSHR32(st->W[chan*N*K*M + j*N*K + speak*N + i],NORMALIZE_SCALEDOWN+16));
               spx_ifft(st->fft_table, st->wtmp2, st->wtmp);
               for (i=0;i<st->frame_size;i++)
               {
                  st->wtmp[i]=0;
               }
               for (i=st->frame_size;i<N;i++)
               {
                  st->wtmp[i]=SHL16(st->wtmp[i],NORMALIZE_SCALEUP);
               }
               spx_fft(st->fft_table, st->wtmp, st->wtmp2);
               /* The "-1" in the shift is a sort of kludge that trades less efficient update speed for decrease noise */
               for (i=0;i<N;i++)
                  st->W[chan*N*K*M + j*N*K + speak*N + i] -= SHL32(EXTEND32(st->wtmp2[i]),16+NORMALIZE_SCALEDOWN-NORMALIZE_SCALEUP-1);
#else
               spx_ifft(st->fft_table, &st->W[chan*N*K*M + j*N*K + speak*N], st->wtmp);
               for (i=st->frame_size;i<N;i++)
               {
                  st->wtmp[i]=0;
               }
               spx_fft(st->fft_table, st->wtmp, &st->W[chan*N*K*M + j*N*K + speak*N]);
#endif
            }
         }
      }
   }
   
   /* So we can use power_spectrum_accum */ 
   for (i=0;i<=st->frame_size;i++)
      st->Rf[i] = st->Yf[i] = st->Xf[i] = 0;
      
   Dbf = 0;
   See = 0;    
#ifdef TWO_PATH
   /* Difference in response, this is used to estimate the variance of our residual power estimate */
   for (chan = 0; chan < C; chan++)
   {
      spectral_mul_accum(st->X, st->W+chan*N*K*M, st->Y+chan*N, N, M*K);
      spx_ifft(st->fft_table, st->Y+chan*N, st->y+chan*N);
      for (i=0;i<st->frame_size;i++)
         st->e[chan*N+i] = SUB16(st->e[chan*N+i+st->frame_size], st->y[chan*N+i+st->frame_size]);
      Dbf += 10+mdf_inner_prod(st->e+chan*N, st->e+chan*N, st->frame_size);
      for (i=0;i<st->frame_size;i++)
         st->e[chan*N+i] = SUB16(st->input[chan*st->frame_size+i], st->y[chan*N+i+st->frame_size]);
      See += mdf_inner_prod(st->e+chan*N, st->e+chan*N, st->frame_size);
   }
#endif

#ifndef TWO_PATH
   Sff = See;
#endif

#ifdef TWO_PATH
   /* Logic for updating the foreground filter */
   
   /* For two time windows, compute the mean of the energy difference, as well as the variance */
   st->Davg1 = ADD32(MULT16_32_Q15(QCONST16(.6f,15),st->Davg1), MULT16_32_Q15(QCONST16(.4f,15),SUB32(Sff,See)));
   st->Davg2 = ADD32(MULT16_32_Q15(QCONST16(.85f,15),st->Davg2), MULT16_32_Q15(QCONST16(.15f,15),SUB32(Sff,See)));
   st->Dvar1 = FLOAT_ADD(FLOAT_MULT(VAR1_SMOOTH, st->Dvar1), FLOAT_MUL32U(MULT16_32_Q15(QCONST16(.4f,15),Sff), MULT16_32_Q15(QCONST16(.4f,15),Dbf)));
   st->Dvar2 = FLOAT_ADD(FLOAT_MULT(VAR2_SMOOTH, st->Dvar2), FLOAT_MUL32U(MULT16_32_Q15(QCONST16(.15f,15),Sff), MULT16_32_Q15(QCONST16(.15f,15),Dbf)));
   
   /* Equivalent float code:
   st->Davg1 = .6*st->Davg1 + .4*(Sff-See);
   st->Davg2 = .85*st->Davg2 + .15*(Sff-See);
   st->Dvar1 = .36*st->Dvar1 + .16*Sff*Dbf;
   st->Dvar2 = .7225*st->Dvar2 + .0225*Sff*Dbf;
   */
   
   update_foreground = 0;
   /* Check if we have a statistically significant reduction in the residual echo */
   /* Note that this is *not* Gaussian, so we need to be careful about the longer tail */
   if (FLOAT_GT(FLOAT_MUL32U(SUB32(Sff,See),ABS32(SUB32(Sff,See))), FLOAT_MUL32U(Sff,Dbf)))
      update_foreground = 1;
   else if (FLOAT_GT(FLOAT_MUL32U(st->Davg1, ABS32(st->Davg1)), FLOAT_MULT(VAR1_UPDATE,(st->Dvar1))))
      update_foreground = 1;
   else if (FLOAT_GT(FLOAT_MUL32U(st->Davg2, ABS32(st->Davg2)), FLOAT_MULT(VAR2_UPDATE,(st->Dvar2))))
      update_foreground = 1;
   
   /* Do we update? */
   if (update_foreground)
   {
      st->Davg1 = st->Davg2 = 0;
      st->Dvar1 = st->Dvar2 = FLOAT_ZERO;
      /* Copy background filter to foreground filter */
      for (i=0;i<N*M*C*K;i++)
         st->foreground[i] = EXTRACT16(PSHR32(st->W[i],16));
      /* Apply a smooth transition so as to not introduce blocking artifacts */
      for (chan = 0; chan < C; chan++)
         for (i=0;i<st->frame_size;i++)
            st->e[chan*N+i+st->frame_size] = MULT16_16_Q15(st->window[i+st->frame_size],st->e[chan*N+i+st->frame_size]) + MULT16_16_Q15(st->window[i],st->y[chan*N+i+st->frame_size]);
   } else {
      int reset_background=0;
      /* Otherwise, check if the background filter is significantly worse */
      if (FLOAT_GT(FLOAT_MUL32U(NEG32(SUB32(Sff,See)),ABS32(SUB32(Sff,See))), FLOAT_MULT(VAR_BACKTRACK,FLOAT_MUL32U(Sff,Dbf))))
         reset_background = 1;
      if (FLOAT_GT(FLOAT_MUL32U(NEG32(st->Davg1), ABS32(st->Davg1)), FLOAT_MULT(VAR_BACKTRACK,st->Dvar1)))
         reset_background = 1;
      if (FLOAT_GT(FLOAT_MUL32U(NEG32(st->Davg2), ABS32(st->Davg2)), FLOAT_MULT(VAR_BACKTRACK,st->Dvar2)))
         reset_background = 1;
      if (reset_background)
      {
         /* Copy foreground filter to background filter */
         for (i=0;i<N*M*C*K;i++)
            st->W[i] = SHL32(EXTEND32(st->foreground[i]),16);
         /* We also need to copy the output so as to get correct adaptation */
         for (chan = 0; chan < C; chan++)
         {        
            for (i=0;i<st->frame_size;i++)
               st->y[chan*N+i+st->frame_size] = st->e[chan*N+i+st->frame_size];
            for (i=0;i<st->frame_size;i++)
               st->e[chan*N+i] = SUB16(st->input[chan*st->frame_size+i], st->y[chan*N+i+st->frame_size]);
         }        
         See = Sff;
         st->Davg1 = st->Davg2 = 0;
         st->Dvar1 = st->Dvar2 = FLOAT_ZERO;
      }
   }
#endif

   Sey = Syy = Sdd = 0;  
   for (chan = 0; chan < C; chan++)
   {    
      /* Compute error signal (for the output with de-emphasis) */ 
      for (i=0;i<st->frame_size;i++)
      {
         spx_word32_t tmp_out;
#ifdef TWO_PATH
         tmp_out = SUB32(EXTEND32(st->input[chan*st->frame_size+i]), EXTEND32(st->e[chan*N+i+st->frame_size]));
#else
         tmp_out = SUB32(EXTEND32(st->input[chan*st->frame_size+i]), EXTEND32(st->y[chan*N+i+st->frame_size]));
#endif
         tmp_out = ADD32(tmp_out, EXTEND32(MULT16_16_P15(st->preemph, st->memE[chan])));
      /* This is an arbitrary test for saturation in the microphone signal */
         if (in[i*C+chan] <= -32000 || in[i*C+chan] >= 32000)
         {
         if (st->saturated == 0)
            st->saturated = 1;
         }
         out[i*C+chan] = WORD2INT(tmp_out);
         st->memE[chan] = tmp_out;
      }

#ifdef DUMP_ECHO_CANCEL_DATA
      dump_audio(in, far_end, out, st->frame_size);
#endif
   
      /* Compute error signal (filter update version) */ 
      for (i=0;i<st->frame_size;i++)
      {
         st->e[chan*N+i+st->frame_size] = st->e[chan*N+i];
         st->e[chan*N+i] = 0;
      }
      
      /* Compute a bunch of correlations */
      /* FIXME: bad merge */
      Sey += mdf_inner_prod(st->e+chan*N+st->frame_size, st->y+chan*N+st->frame_size, st->frame_size);
      Syy += mdf_inner_prod(st->y+chan*N+st->frame_size, st->y+chan*N+st->frame_size, st->frame_size);
      Sdd += mdf_inner_prod(st->input+chan*st->frame_size, st->input+chan*st->frame_size, st->frame_size);
      
      /* Convert error to frequency domain */
      spx_fft(st->fft_table, st->e+chan*N, st->E+chan*N);
      for (i=0;i<st->frame_size;i++)
         st->y[i+chan*N] = 0;
      spx_fft(st->fft_table, st->y+chan*N, st->Y+chan*N);
   
      /* Compute power spectrum of echo (X), error (E) and filter response (Y) */
      power_spectrum_accum(st->E+chan*N, st->Rf, N);
      power_spectrum_accum(st->Y+chan*N, st->Yf, N);
    
   }
   
   /*printf ("%f %f %f %f\n", Sff, See, Syy, Sdd, st->update_cond);*/
   
   /* Do some sanity check */
   if (!(Syy>=0 && Sxx>=0 && See >= 0)
#ifndef FIXED_POINT
       || !(Sff < N*1e9 && Syy < N*1e9 && Sxx < N*1e9)
#endif
      )
   {
      /* Things have gone really bad */
      st->screwed_up += 50;
      for (i=0;i<st->frame_size*C;i++)
         out[i] = 0;
   } else if (SHR32(Sff, 2) > ADD32(Sdd, SHR32(MULT16_16(N, 10000),6)))
   {
      /* AEC seems to add lots of echo instead of removing it, let's see if it will improve */
      st->screwed_up++;
   } else {
      /* Everything's fine */
      st->screwed_up=0;
   }
   if (st->screwed_up>=50)
   {
      speex_warning("The echo canceller started acting funny and got slapped (reset). It swears it will behave now.");
      speex_echo_state_reset(st);
      return;
   }

   /* Add a small noise floor to make sure not to have problems when dividing */
   See = MAX32(See, SHR32(MULT16_16(N, 100),6));
     
   for (speak = 0; speak < K; speak++)
   {
      Sxx += mdf_inner_prod(st->x+speak*N+st->frame_size, st->x+speak*N+st->frame_size, st->frame_size);
      power_spectrum_accum(st->X+speak*N, st->Xf, N);
   }

   
   /* Smooth far end energy estimate over time */
   for (j=0;j<=st->frame_size;j++)
      st->power[j] = MULT16_32_Q15(ss_1,st->power[j]) + 1 + MULT16_32_Q15(ss,st->Xf[j]);

   /* Compute filtered spectra and (cross-)correlations */
   for (j=st->frame_size;j>=0;j--)
   {
      spx_float_t Eh, Yh;
      Eh = PSEUDOFLOAT(st->Rf[j] - st->Eh[j]);
      Yh = PSEUDOFLOAT(st->Yf[j] - st->Yh[j]);
      Pey = FLOAT_ADD(Pey,FLOAT_MULT(Eh,Yh));
      Pyy = FLOAT_ADD(Pyy,FLOAT_MULT(Yh,Yh));
#ifdef FIXED_POINT
      st->Eh[j] = MAC16_32_Q15(MULT16_32_Q15(SUB16(32767,st->spec_average),st->Eh[j]), st->spec_average, st->Rf[j]);
      st->Yh[j] = MAC16_32_Q15(MULT16_32_Q15(SUB16(32767,st->spec_average),st->Yh[j]), st->spec_average, st->Yf[j]);
#else
      st->Eh[j] = (1-st->spec_average)*st->Eh[j] + st->spec_average*st->Rf[j];
      st->Yh[j] = (1-st->spec_average)*st->Yh[j] + st->spec_average*st->Yf[j];
#endif
   }
   
   Pyy = FLOAT_SQRT(Pyy);
   Pey = FLOAT_DIVU(Pey,Pyy);

   /* Compute correlation updatete rate */
   tmp32 = MULT16_32_Q15(st->beta0,Syy);
   if (tmp32 > MULT16_32_Q15(st->beta_max,See))
      tmp32 = MULT16_32_Q15(st->beta_max,See);
   alpha = FLOAT_DIV32(tmp32, See);
   alpha_1 = FLOAT_SUB(FLOAT_ONE, alpha);
   /* Update correlations (recursive average) */
   st->Pey = FLOAT_ADD(FLOAT_MULT(alpha_1,st->Pey) , FLOAT_MULT(alpha,Pey));
   st->Pyy = FLOAT_ADD(FLOAT_MULT(alpha_1,st->Pyy) , FLOAT_MULT(alpha,Pyy));
   if (FLOAT_LT(st->Pyy, FLOAT_ONE))
      st->Pyy = FLOAT_ONE;
   /* We don't really hope to get better than 33 dB (MIN_LEAK-3dB) attenuation anyway */
   if (FLOAT_LT(st->Pey, FLOAT_MULT(MIN_LEAK,st->Pyy)))
      st->Pey = FLOAT_MULT(MIN_LEAK,st->Pyy);
   if (FLOAT_GT(st->Pey, st->Pyy))
      st->Pey = st->Pyy;
   /* leak_estimate is the linear regression result */
   st->leak_estimate = FLOAT_EXTRACT16(FLOAT_SHL(FLOAT_DIVU(st->Pey, st->Pyy),14));
   /* This looks like a stupid bug, but it's right (because we convert from Q14 to Q15) */
   if (st->leak_estimate > 16383)
      st->leak_estimate = 32767;
   else
      st->leak_estimate = SHL16(st->leak_estimate,1);
   /*printf ("%f\n", st->leak_estimate);*/
   
   /* Compute Residual to Error Ratio */
#ifdef FIXED_POINT
   tmp32 = MULT16_32_Q15(st->leak_estimate,Syy);
   tmp32 = ADD32(SHR32(Sxx,13), ADD32(tmp32, SHL32(tmp32,1)));
   /* Check for y in e (lower bound on RER) */
   {
      spx_float_t bound = PSEUDOFLOAT(Sey);
      bound = FLOAT_DIVU(FLOAT_MULT(bound, bound), PSEUDOFLOAT(ADD32(1,Syy)));
      if (FLOAT_GT(bound, PSEUDOFLOAT(See)))
         tmp32 = See;
      else if (tmp32 < FLOAT_EXTRACT32(bound))
         tmp32 = FLOAT_EXTRACT32(bound);
   }
   if (tmp32 > SHR32(See,1))
      tmp32 = SHR32(See,1);
   RER = FLOAT_EXTRACT16(FLOAT_SHL(FLOAT_DIV32(tmp32,See),15));
#else
   RER = (.0001*Sxx + 3.*MULT16_32_Q15(st->leak_estimate,Syy)) / See;
   /* Check for y in e (lower bound on RER) */
   if (RER < Sey*Sey/(1+See*Syy))
      RER = Sey*Sey/(1+See*Syy);
   if (RER > .5)
      RER = .5;
#endif

   /* We consider that the filter has had minimal adaptation if the following is true*/
   if (!st->adapted && st->sum_adapt > SHL32(EXTEND32(M),15) && MULT16_32_Q15(st->leak_estimate,Syy) > MULT16_32_Q15(QCONST16(.03f,15),Syy))
   {
      st->adapted = 1;
   }

   if (st->adapted)
   {
      /* Normal learning rate calculation once we're past the minimal adaptation phase */
      for (i=0;i<=st->frame_size;i++)
      {
         spx_word32_t r, e;
         /* Compute frequency-domain adaptation mask */
         r = MULT16_32_Q15(st->leak_estimate,SHL32(st->Yf[i],3));
         e = SHL32(st->Rf[i],3)+1;
#ifdef FIXED_POINT
         if (r>SHR32(e,1))
            r = SHR32(e,1);
#else
         if (r>.5*e)
            r = .5*e;
#endif
         r = MULT16_32_Q15(QCONST16(.7,15),r) + MULT16_32_Q15(QCONST16(.3,15),(spx_word32_t)(MULT16_32_Q15(RER,e)));
         /*st->power_1[i] = adapt_rate*r/(e*(1+st->power[i]));*/
         st->power_1[i] = FLOAT_SHL(FLOAT_DIV32_FLOAT(r,FLOAT_MUL32U(e,st->power[i]+10)),WEIGHT_SHIFT+16);
      }
   } else {
      /* Temporary adaption rate if filter is not yet adapted enough */
      spx_word16_t adapt_rate=0;

      if (Sxx > SHR32(MULT16_16(N, 1000),6)) 
      {
         tmp32 = MULT16_32_Q15(QCONST16(.25f, 15), Sxx);
#ifdef FIXED_POINT
         if (tmp32 > SHR32(See,2))
            tmp32 = SHR32(See,2);
#else
         if (tmp32 > .25*See)
            tmp32 = .25*See;
#endif
         adapt_rate = FLOAT_EXTRACT16(FLOAT_SHL(FLOAT_DIV32(tmp32, See),15));
      }
      for (i=0;i<=st->frame_size;i++)
         st->power_1[i] = FLOAT_SHL(FLOAT_DIV32(EXTEND32(adapt_rate),ADD32(st->power[i],10)),WEIGHT_SHIFT+1);


      /* How much have we adapted so far? */
      st->sum_adapt = ADD32(st->sum_adapt,adapt_rate);
   }

   /* FIXME: MC conversion required */ 
      for (i=0;i<st->frame_size;i++)
         st->last_y[i] = st->last_y[st->frame_size+i];
   if (st->adapted)
   {
      /* If the filter is adapted, take the filtered echo */
      for (i=0;i<st->frame_size;i++)
         st->last_y[st->frame_size+i] = in[i]-out[i];
   } else {
      /* If filter isn't adapted yet, all we can do is take the far end signal directly */
      /* moved earlier: for (i=0;i<N;i++)
      st->last_y[i] = st->x[i];*/
   }

}