Exemplo n.º 1
0
void tonality_get_info(TonalityAnalysisState *tonal, AnalysisInfo *info_out, int len)
{
   int pos;
   int curr_lookahead;
   float psum;
   int i;

   pos = tonal->read_pos;
   curr_lookahead = tonal->write_pos-tonal->read_pos;
   if (curr_lookahead<0)
      curr_lookahead += DETECT_SIZE;

   /* On long frames, look at the second analysis window rather than the first. */
   if (len > tonal->Fs/50 && pos != tonal->write_pos)
   {
      pos++;
      if (pos==DETECT_SIZE)
         pos=0;
   }
   if (pos == tonal->write_pos)
      pos--;
   if (pos<0)
      pos = DETECT_SIZE-1;
   OPUS_COPY(info_out, &tonal->info[pos], 1);
   /* If possible, look ahead for a tone to compensate for the delay in the tone detector. */
   for (i=0;i<3;i++)
   {
      pos++;
      if (pos==DETECT_SIZE)
         pos = 0;
      if (pos == tonal->write_pos)
         break;
      info_out->tonality = MAX32(0, -.03f + MAX32(info_out->tonality, tonal->info[pos].tonality-.05f));
   }
   tonal->read_subframe += len/(tonal->Fs/400);
   while (tonal->read_subframe>=8)
   {
      tonal->read_subframe -= 8;
      tonal->read_pos++;
   }
   if (tonal->read_pos>=DETECT_SIZE)
      tonal->read_pos-=DETECT_SIZE;

   /* The -1 is to compensate for the delay in the features themselves. */
   curr_lookahead = IMAX(curr_lookahead-1, 0);

   psum=0;
   /* Summing the probability of transition patterns that involve music at
      time (DETECT_SIZE-curr_lookahead-1) */
   for (i=0;i<DETECT_SIZE-curr_lookahead;i++)
      psum += tonal->pmusic[i];
   for (;i<DETECT_SIZE;i++)
      psum += tonal->pspeech[i];
   psum = psum*tonal->music_confidence + (1-psum)*tonal->speech_confidence;
   /*printf("%f %f %f %f %f\n", psum, info_out->music_prob, info_out->vad_prob, info_out->activity_probability, info_out->tonality);*/

   info_out->music_prob = psum;
}
Exemplo n.º 2
0
static opus_val32 downmix_and_resample(downmix_func downmix, const void *_x, opus_val32 *y, opus_val32 S[3], int subframe, int offset, int c1, int c2, int C, int Fs)
{
   VARDECL(opus_val32, tmp);
   opus_val32 scale;
   int j;
   opus_val32 ret = 0;
   SAVE_STACK;

   if (subframe==0) return 0;
   if (Fs == 48000)
   {
      subframe *= 2;
      offset *= 2;
   } else if (Fs == 16000) {
      subframe = subframe*2/3;
      offset = offset*2/3;
   }
   ALLOC(tmp, subframe, opus_val32);

   downmix(_x, tmp, subframe, offset, c1, c2, C);
#ifdef FIXED_POINT
   scale = (1<<SIG_SHIFT);
#else
   scale = 1.f/32768;
#endif
   if (c2==-2)
      scale /= C;
   else if (c2>-1)
      scale /= 2;
   for (j=0;j<subframe;j++)
      tmp[j] *= scale;
   if (Fs == 48000)
   {
      ret = silk_resampler_down2_hp(S, y, tmp, subframe);
   } else if (Fs == 24000) {
      OPUS_COPY(y, tmp, subframe);
   } else if (Fs == 16000) {
      VARDECL(opus_val32, tmp3x);
      ALLOC(tmp3x, 3*subframe, opus_val32);
      /* Don't do this at home! This resampler is horrible and it's only (barely)
         usable for the purpose of the analysis because we don't care about all
         the aliasing between 8 kHz and 12 kHz. */
      for (j=0;j<subframe;j++)
      {
         tmp3x[3*j] = tmp[j];
         tmp3x[3*j+1] = tmp[j];
         tmp3x[3*j+2] = tmp[j];
      }
      silk_resampler_down2_hp(S, y, tmp3x, 3*subframe);
   }
   RESTORE_STACK;
   return ret;
}
Exemplo n.º 3
0
void tonality_get_info(TonalityAnalysisState *tonal, AnalysisInfo *info_out, int len)
{
   int pos;
   int curr_lookahead;
   float psum;
   int i;

   pos = tonal->read_pos;
   curr_lookahead = tonal->write_pos-tonal->read_pos;
   if (curr_lookahead<0)
      curr_lookahead += DETECT_SIZE;

   if (len > 480 && pos != tonal->write_pos)
   {
      pos++;
      if (pos==DETECT_SIZE)
         pos=0;
   }
   if (pos == tonal->write_pos)
      pos--;
   if (pos<0)
      pos = DETECT_SIZE-1;
   OPUS_COPY(info_out, &tonal->info[pos], 1);
   tonal->read_subframe += len/120;
   while (tonal->read_subframe>=4)
   {
      tonal->read_subframe -= 4;
      tonal->read_pos++;
   }
   if (tonal->read_pos>=DETECT_SIZE)
      tonal->read_pos-=DETECT_SIZE;

   /* Compensate for the delay in the features themselves.
      FIXME: Need a better estimate the 10 I just made up */
   curr_lookahead = IMAX(curr_lookahead-10, 0);

   psum=0;
   /* Summing the probability of transition patterns that involve music at
      time (DETECT_SIZE-curr_lookahead-1) */
   for (i=0;i<DETECT_SIZE-curr_lookahead;i++)
      psum += tonal->pmusic[i];
   for (;i<DETECT_SIZE;i++)
      psum += tonal->pspeech[i];
   psum = psum*tonal->music_confidence + (1-psum)*tonal->speech_confidence;
   /*printf("%f %f %f\n", psum, info_out->music_prob, info_out->tonality);*/

   info_out->music_prob = psum;
}
Exemplo n.º 4
0
void tonality_analysis(TonalityAnalysisState *tonal, AnalysisInfo *info_out, const CELTMode *celt_mode, const void *x, int len, int offset, int c1, int c2, int C, int lsb_depth, downmix_func downmix)
{
    int i, b;
    const kiss_fft_state *kfft;
    VARDECL(kiss_fft_cpx, in);
    VARDECL(kiss_fft_cpx, out);
    int N = 480, N2=240;
    float * OPUS_RESTRICT A = tonal->angle;
    float * OPUS_RESTRICT dA = tonal->d_angle;
    float * OPUS_RESTRICT d2A = tonal->d2_angle;
    VARDECL(float, tonality);
    VARDECL(float, noisiness);
    float band_tonality[NB_TBANDS];
    float logE[NB_TBANDS];
    float BFCC[8];
    float features[25];
    float frame_tonality;
    float max_frame_tonality;
    /*float tw_sum=0;*/
    float frame_noisiness;
    const float pi4 = (float)(M_PI*M_PI*M_PI*M_PI);
    float slope=0;
    float frame_stationarity;
    float relativeE;
    float frame_probs[2];
    float alpha, alphaE, alphaE2;
    float frame_loudness;
    float bandwidth_mask;
    int bandwidth=0;
    float maxE = 0;
    float noise_floor;
    int remaining;
    AnalysisInfo *info;
    SAVE_STACK;

    tonal->last_transition++;
    alpha = 1.f/IMIN(20, 1+tonal->count);
    alphaE = 1.f/IMIN(50, 1+tonal->count);
    alphaE2 = 1.f/IMIN(1000, 1+tonal->count);

    if (tonal->count<4)
       tonal->music_prob = .5;
    kfft = celt_mode->mdct.kfft[0];
    if (tonal->count==0)
       tonal->mem_fill = 240;
    downmix(x, &tonal->inmem[tonal->mem_fill], IMIN(len, ANALYSIS_BUF_SIZE-tonal->mem_fill), offset, c1, c2, C);
    if (tonal->mem_fill+len < ANALYSIS_BUF_SIZE)
    {
       tonal->mem_fill += len;
       /* Don't have enough to update the analysis */
       RESTORE_STACK;
       return;
    }
    info = &tonal->info[tonal->write_pos++];
    if (tonal->write_pos>=DETECT_SIZE)
       tonal->write_pos-=DETECT_SIZE;

    ALLOC(in, 480, kiss_fft_cpx);
    ALLOC(out, 480, kiss_fft_cpx);
    ALLOC(tonality, 240, float);
    ALLOC(noisiness, 240, float);
    for (i=0;i<N2;i++)
    {
       float w = analysis_window[i];
       in[i].r = (kiss_fft_scalar)(w*tonal->inmem[i]);
       in[i].i = (kiss_fft_scalar)(w*tonal->inmem[N2+i]);
       in[N-i-1].r = (kiss_fft_scalar)(w*tonal->inmem[N-i-1]);
       in[N-i-1].i = (kiss_fft_scalar)(w*tonal->inmem[N+N2-i-1]);
    }
    OPUS_MOVE(tonal->inmem, tonal->inmem+ANALYSIS_BUF_SIZE-240, 240);
    remaining = len - (ANALYSIS_BUF_SIZE-tonal->mem_fill);
    downmix(x, &tonal->inmem[240], remaining, offset+ANALYSIS_BUF_SIZE-tonal->mem_fill, c1, c2, C);
    tonal->mem_fill = 240 + remaining;
    opus_fft(kfft, in, out);

    for (i=1;i<N2;i++)
    {
       float X1r, X2r, X1i, X2i;
       float angle, d_angle, d2_angle;
       float angle2, d_angle2, d2_angle2;
       float mod1, mod2, avg_mod;
       X1r = (float)out[i].r+out[N-i].r;
       X1i = (float)out[i].i-out[N-i].i;
       X2r = (float)out[i].i+out[N-i].i;
       X2i = (float)out[N-i].r-out[i].r;

       angle = (float)(.5f/M_PI)*fast_atan2f(X1i, X1r);
       d_angle = angle - A[i];
       d2_angle = d_angle - dA[i];

       angle2 = (float)(.5f/M_PI)*fast_atan2f(X2i, X2r);
       d_angle2 = angle2 - angle;
       d2_angle2 = d_angle2 - d_angle;

       mod1 = d2_angle - (float)floor(.5+d2_angle);
       noisiness[i] = ABS16(mod1);
       mod1 *= mod1;
       mod1 *= mod1;

       mod2 = d2_angle2 - (float)floor(.5+d2_angle2);
       noisiness[i] += ABS16(mod2);
       mod2 *= mod2;
       mod2 *= mod2;

       avg_mod = .25f*(d2A[i]+2.f*mod1+mod2);
       tonality[i] = 1.f/(1.f+40.f*16.f*pi4*avg_mod)-.015f;

       A[i] = angle2;
       dA[i] = d_angle2;
       d2A[i] = mod2;
    }

    frame_tonality = 0;
    max_frame_tonality = 0;
    /*tw_sum = 0;*/
    info->activity = 0;
    frame_noisiness = 0;
    frame_stationarity = 0;
    if (!tonal->count)
    {
       for (b=0;b<NB_TBANDS;b++)
       {
          tonal->lowE[b] = 1e10;
          tonal->highE[b] = -1e10;
       }
    }
    relativeE = 0;
    frame_loudness = 0;
    for (b=0;b<NB_TBANDS;b++)
    {
       float E=0, tE=0, nE=0;
       float L1, L2;
       float stationarity;
       for (i=tbands[b];i<tbands[b+1];i++)
       {
          float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r
                     + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i;
#ifdef FIXED_POINT
          /* FIXME: It's probably best to change the BFCC filter initial state instead */
          binE *= 5.55e-17f;
#endif
          E += binE;
          tE += binE*tonality[i];
          nE += binE*2.f*(.5f-noisiness[i]);
       }
       tonal->E[tonal->E_count][b] = E;
       frame_noisiness += nE/(1e-15f+E);

       frame_loudness += (float)sqrt(E+1e-10f);
       logE[b] = (float)log(E+1e-10f);
       tonal->lowE[b] = MIN32(logE[b], tonal->lowE[b]+.01f);
       tonal->highE[b] = MAX32(logE[b], tonal->highE[b]-.1f);
       if (tonal->highE[b] < tonal->lowE[b]+1.f)
       {
          tonal->highE[b]+=.5f;
          tonal->lowE[b]-=.5f;
       }
       relativeE += (logE[b]-tonal->lowE[b])/(1e-15f+tonal->highE[b]-tonal->lowE[b]);

       L1=L2=0;
       for (i=0;i<NB_FRAMES;i++)
       {
          L1 += (float)sqrt(tonal->E[i][b]);
          L2 += tonal->E[i][b];
       }

       stationarity = MIN16(0.99f,L1/(float)sqrt(1e-15+NB_FRAMES*L2));
       stationarity *= stationarity;
       stationarity *= stationarity;
       frame_stationarity += stationarity;
       /*band_tonality[b] = tE/(1e-15+E)*/;
       band_tonality[b] = MAX16(tE/(1e-15f+E), stationarity*tonal->prev_band_tonality[b]);
#if 0
       if (b>=NB_TONAL_SKIP_BANDS)
       {
          frame_tonality += tweight[b]*band_tonality[b];
          tw_sum += tweight[b];
       }
#else
       frame_tonality += band_tonality[b];
       if (b>=NB_TBANDS-NB_TONAL_SKIP_BANDS)
          frame_tonality -= band_tonality[b-NB_TBANDS+NB_TONAL_SKIP_BANDS];
#endif
       max_frame_tonality = MAX16(max_frame_tonality, (1.f+.03f*(b-NB_TBANDS))*frame_tonality);
       slope += band_tonality[b]*(b-8);
       /*printf("%f %f ", band_tonality[b], stationarity);*/
       tonal->prev_band_tonality[b] = band_tonality[b];
    }

    bandwidth_mask = 0;
    bandwidth = 0;
    maxE = 0;
    noise_floor = 5.7e-4f/(1<<(IMAX(0,lsb_depth-8)));
#ifdef FIXED_POINT
    noise_floor *= 1<<(15+SIG_SHIFT);
#endif
    noise_floor *= noise_floor;
    for (b=0;b<NB_TOT_BANDS;b++)
    {
       float E=0;
       int band_start, band_end;
       /* Keep a margin of 300 Hz for aliasing */
       band_start = extra_bands[b];
       band_end = extra_bands[b+1];
       for (i=band_start;i<band_end;i++)
       {
          float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r
                     + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i;
          E += binE;
       }
       maxE = MAX32(maxE, E);
       tonal->meanE[b] = MAX32((1-alphaE2)*tonal->meanE[b], E);
       E = MAX32(E, tonal->meanE[b]);
       /* Use a simple follower with 13 dB/Bark slope for spreading function */
       bandwidth_mask = MAX32(.05f*bandwidth_mask, E);
       /* Consider the band "active" only if all these conditions are met:
          1) less than 10 dB below the simple follower
          2) less than 90 dB below the peak band (maximal masking possible considering
             both the ATH and the loudness-dependent slope of the spreading function)
          3) above the PCM quantization noise floor
       */
       if (E>.1*bandwidth_mask && E*1e9f > maxE && E > noise_floor*(band_end-band_start))
          bandwidth = b;
    }
    if (tonal->count<=2)
       bandwidth = 20;
    frame_loudness = 20*(float)log10(frame_loudness);
    tonal->Etracker = MAX32(tonal->Etracker-.03f, frame_loudness);
    tonal->lowECount *= (1-alphaE);
    if (frame_loudness < tonal->Etracker-30)
       tonal->lowECount += alphaE;

    for (i=0;i<8;i++)
    {
       float sum=0;
       for (b=0;b<16;b++)
          sum += dct_table[i*16+b]*logE[b];
       BFCC[i] = sum;
    }

    frame_stationarity /= NB_TBANDS;
    relativeE /= NB_TBANDS;
    if (tonal->count<10)
       relativeE = .5;
    frame_noisiness /= NB_TBANDS;
#if 1
    info->activity = frame_noisiness + (1-frame_noisiness)*relativeE;
#else
    info->activity = .5*(1+frame_noisiness-frame_stationarity);
#endif
    frame_tonality = (max_frame_tonality/(NB_TBANDS-NB_TONAL_SKIP_BANDS));
    frame_tonality = MAX16(frame_tonality, tonal->prev_tonality*.8f);
    tonal->prev_tonality = frame_tonality;

    slope /= 8*8;
    info->tonality_slope = slope;

    tonal->E_count = (tonal->E_count+1)%NB_FRAMES;
    tonal->count++;
    info->tonality = frame_tonality;

    for (i=0;i<4;i++)
       features[i] = -0.12299f*(BFCC[i]+tonal->mem[i+24]) + 0.49195f*(tonal->mem[i]+tonal->mem[i+16]) + 0.69693f*tonal->mem[i+8] - 1.4349f*tonal->cmean[i];

    for (i=0;i<4;i++)
       tonal->cmean[i] = (1-alpha)*tonal->cmean[i] + alpha*BFCC[i];

    for (i=0;i<4;i++)
        features[4+i] = 0.63246f*(BFCC[i]-tonal->mem[i+24]) + 0.31623f*(tonal->mem[i]-tonal->mem[i+16]);
    for (i=0;i<3;i++)
        features[8+i] = 0.53452f*(BFCC[i]+tonal->mem[i+24]) - 0.26726f*(tonal->mem[i]+tonal->mem[i+16]) -0.53452f*tonal->mem[i+8];

    if (tonal->count > 5)
    {
       for (i=0;i<9;i++)
          tonal->std[i] = (1-alpha)*tonal->std[i] + alpha*features[i]*features[i];
    }

    for (i=0;i<8;i++)
    {
       tonal->mem[i+24] = tonal->mem[i+16];
       tonal->mem[i+16] = tonal->mem[i+8];
       tonal->mem[i+8] = tonal->mem[i];
       tonal->mem[i] = BFCC[i];
    }
    for (i=0;i<9;i++)
       features[11+i] = (float)sqrt(tonal->std[i]);
    features[20] = info->tonality;
    features[21] = info->activity;
    features[22] = frame_stationarity;
    features[23] = info->tonality_slope;
    features[24] = tonal->lowECount;

#ifndef DISABLE_FLOAT_API
    mlp_process(&net, features, frame_probs);
    frame_probs[0] = .5f*(frame_probs[0]+1);
    /* Curve fitting between the MLP probability and the actual probability */
    frame_probs[0] = .01f + 1.21f*frame_probs[0]*frame_probs[0] - .23f*(float)pow(frame_probs[0], 10);
    /* Probability of active audio (as opposed to silence) */
    frame_probs[1] = .5f*frame_probs[1]+.5f;
    /* Consider that silence has a 50-50 probability. */
    frame_probs[0] = frame_probs[1]*frame_probs[0] + (1-frame_probs[1])*.5f;

    /*printf("%f %f ", frame_probs[0], frame_probs[1]);*/
    {
       /* Probability of state transition */
       float tau;
       /* Represents independence of the MLP probabilities, where
          beta=1 means fully independent. */
       float beta;
       /* Denormalized probability of speech (p0) and music (p1) after update */
       float p0, p1;
       /* Probabilities for "all speech" and "all music" */
       float s0, m0;
       /* Probability sum for renormalisation */
       float psum;
       /* Instantaneous probability of speech and music, with beta pre-applied. */
       float speech0;
       float music0;

       /* One transition every 3 minutes of active audio */
       tau = .00005f*frame_probs[1];
       beta = .05f;
       if (1) {
          /* Adapt beta based on how "unexpected" the new prob is */
          float p, q;
          p = MAX16(.05f,MIN16(.95f,frame_probs[0]));
          q = MAX16(.05f,MIN16(.95f,tonal->music_prob));
          beta = .01f+.05f*ABS16(p-q)/(p*(1-q)+q*(1-p));
       }
       /* p0 and p1 are the probabilities of speech and music at this frame
          using only information from previous frame and applying the
          state transition model */
       p0 = (1-tonal->music_prob)*(1-tau) +    tonal->music_prob *tau;
       p1 =    tonal->music_prob *(1-tau) + (1-tonal->music_prob)*tau;
       /* We apply the current probability with exponent beta to work around
          the fact that the probability estimates aren't independent. */
       p0 *= (float)pow(1-frame_probs[0], beta);
       p1 *= (float)pow(frame_probs[0], beta);
       /* Normalise the probabilities to get the Marokv probability of music. */
       tonal->music_prob = p1/(p0+p1);
       info->music_prob = tonal->music_prob;

       /* This chunk of code deals with delayed decision. */
       psum=1e-20f;
       /* Instantaneous probability of speech and music, with beta pre-applied. */
       speech0 = (float)pow(1-frame_probs[0], beta);
       music0  = (float)pow(frame_probs[0], beta);
       if (tonal->count==1)
       {
          tonal->pspeech[0]=.5;
          tonal->pmusic [0]=.5;
       }
       /* Updated probability of having only speech (s0) or only music (m0),
          before considering the new observation. */
       s0 = tonal->pspeech[0] + tonal->pspeech[1];
       m0 = tonal->pmusic [0] + tonal->pmusic [1];
       /* Updates s0 and m0 with instantaneous probability. */
       tonal->pspeech[0] = s0*(1-tau)*speech0;
       tonal->pmusic [0] = m0*(1-tau)*music0;
       /* Propagate the transition probabilities */
       for (i=1;i<DETECT_SIZE-1;i++)
       {
          tonal->pspeech[i] = tonal->pspeech[i+1]*speech0;
          tonal->pmusic [i] = tonal->pmusic [i+1]*music0;
       }
       /* Probability that the latest frame is speech, when all the previous ones were music. */
       tonal->pspeech[DETECT_SIZE-1] = m0*tau*speech0;
       /* Probability that the latest frame is music, when all the previous ones were speech. */
       tonal->pmusic [DETECT_SIZE-1] = s0*tau*music0;

       /* Renormalise probabilities to 1 */
       for (i=0;i<DETECT_SIZE;i++)
          psum += tonal->pspeech[i] + tonal->pmusic[i];
       psum = 1.f/psum;
       for (i=0;i<DETECT_SIZE;i++)
       {
          tonal->pspeech[i] *= psum;
          tonal->pmusic [i] *= psum;
       }
       psum = tonal->pmusic[0];
       for (i=1;i<DETECT_SIZE;i++)
          psum += tonal->pspeech[i];

       /* Estimate our confidence in the speech/music decisions */
       if (frame_probs[1]>.75)
       {
          if (tonal->music_prob>.9)
          {
             float adapt;
             adapt = 1.f/(++tonal->music_confidence_count);
             tonal->music_confidence_count = IMIN(tonal->music_confidence_count, 500);
             tonal->music_confidence += adapt*MAX16(-.2f,frame_probs[0]-tonal->music_confidence);
          }
          if (tonal->music_prob<.1)
          {
             float adapt;
             adapt = 1.f/(++tonal->speech_confidence_count);
             tonal->speech_confidence_count = IMIN(tonal->speech_confidence_count, 500);
             tonal->speech_confidence += adapt*MIN16(.2f,frame_probs[0]-tonal->speech_confidence);
          }
       } else {
          if (tonal->music_confidence_count==0)
             tonal->music_confidence = .9f;
          if (tonal->speech_confidence_count==0)
             tonal->speech_confidence = .1f;
       }
    }
    if (tonal->last_music != (tonal->music_prob>.5f))
       tonal->last_transition=0;
    tonal->last_music = tonal->music_prob>.5f;
#else
    info->music_prob = 0;
#endif
    /*for (i=0;i<25;i++)
       printf("%f ", features[i]);
    printf("\n");*/

    info->bandwidth = bandwidth;
    /*printf("%d %d\n", info->bandwidth, info->opus_bandwidth);*/
    info->noisiness = frame_noisiness;
    info->valid = 1;
    if (info_out!=NULL)
       OPUS_COPY(info_out, info, 1);
    RESTORE_STACK;
}
Exemplo n.º 5
0
void quant_coarse_energy(const CELTMode *m, int start, int end, int effEnd,
      const opus_val16 *eBands, opus_val16 *oldEBands, opus_uint32 budget,
      opus_val16 *error, ec_enc *enc, int C, int LM, int nbAvailableBytes,
      int force_intra, opus_val32 *delayedIntra, int two_pass, int loss_rate)
{
   int intra;
   opus_val16 max_decay;
   VARDECL(opus_val16, oldEBands_intra);
   VARDECL(opus_val16, error_intra);
   ec_enc enc_start_state;
   opus_uint32 tell;
   int badness1=0;
   opus_int32 intra_bias;
   opus_val32 new_distortion;
   SAVE_STACK;

   intra = force_intra || (!two_pass && *delayedIntra>2*C*(end-start) && nbAvailableBytes > (end-start)*C);
   intra_bias = (opus_int32)((budget**delayedIntra*loss_rate)/(C*512));
   new_distortion = loss_distortion(eBands, oldEBands, start, effEnd, m->nbEBands, C);

   tell = ec_tell(enc);
   if (tell+3 > budget)
      two_pass = intra = 0;

   /* Encode the global flags using a simple probability model
      (first symbols in the stream) */

   max_decay = QCONST16(16.f,DB_SHIFT);
   if (end-start>10)
   {
#ifdef FIXED_POINT
      max_decay = MIN32(max_decay, SHL32(EXTEND32(nbAvailableBytes),DB_SHIFT-3));
#else
      max_decay = MIN32(max_decay, .125f*nbAvailableBytes);
#endif
   }
   enc_start_state = *enc;

   ALLOC(oldEBands_intra, C*m->nbEBands, opus_val16);
   ALLOC(error_intra, C*m->nbEBands, opus_val16);
   OPUS_COPY(oldEBands_intra, oldEBands, C*m->nbEBands);

   if (two_pass || intra)
   {
      badness1 = quant_coarse_energy_impl(m, start, end, eBands, oldEBands_intra, budget,
            tell, e_prob_model[LM][1], error_intra, enc, C, LM, 1, max_decay);
   }

   if (!intra)
   {
      unsigned char *intra_buf;
      ec_enc enc_intra_state;
      opus_int32 tell_intra;
      opus_uint32 nstart_bytes;
      opus_uint32 nintra_bytes;
      int badness2;
      VARDECL(unsigned char, intra_bits);

      tell_intra = ec_tell_frac(enc);

      enc_intra_state = *enc;

      nstart_bytes = ec_range_bytes(&enc_start_state);
      nintra_bytes = ec_range_bytes(&enc_intra_state);
      intra_buf = ec_get_buffer(&enc_intra_state) + nstart_bytes;
      ALLOC(intra_bits, nintra_bytes-nstart_bytes, unsigned char);
      /* Copy bits from intra bit-stream */
      OPUS_COPY(intra_bits, intra_buf, nintra_bytes - nstart_bytes);

      *enc = enc_start_state;

      badness2 = quant_coarse_energy_impl(m, start, end, eBands, oldEBands, budget,
            tell, e_prob_model[LM][intra], error, enc, C, LM, 0, max_decay);

      if (two_pass && (badness1 < badness2 || (badness1 == badness2 && ((opus_int32)ec_tell_frac(enc))+intra_bias > tell_intra)))
      {
         *enc = enc_intra_state;
         /* Copy intra bits to bit-stream */
         OPUS_COPY(intra_buf, intra_bits, nintra_bytes - nstart_bytes);
         OPUS_COPY(oldEBands, oldEBands_intra, C*m->nbEBands);
         OPUS_COPY(error, error_intra, C*m->nbEBands);
         intra = 1;
      }
   } else {
Exemplo n.º 6
0
void surround_analysis(const CELTMode *celt_mode, const void *pcm, opus_val16 *bandLogE, opus_val32 *mem, opus_val32 *preemph_mem,
      int len, int overlap, int channels, int rate, opus_copy_channel_in_func copy_channel_in, int arch
)
{
   int c;
   int i;
   int LM;
   int pos[8] = {0};
   int upsample;
   int frame_size;
   int freq_size;
   opus_val16 channel_offset;
   opus_val32 bandE[21];
   opus_val16 maskLogE[3][21];
   VARDECL(opus_val32, in);
   VARDECL(opus_val16, x);
   VARDECL(opus_val32, freq);
   SAVE_STACK;

   upsample = resampling_factor(rate);
   frame_size = len*upsample;
   freq_size = IMIN(960, frame_size);

   /* LM = log2(frame_size / 120) */
   for (LM=0;LM<celt_mode->maxLM;LM++)
      if (celt_mode->shortMdctSize<<LM==frame_size)
         break;

   ALLOC(in, frame_size+overlap, opus_val32);
   ALLOC(x, len, opus_val16);
   ALLOC(freq, freq_size, opus_val32);

   channel_pos(channels, pos);

   for (c=0;c<3;c++)
      for (i=0;i<21;i++)
         maskLogE[c][i] = -QCONST16(28.f, DB_SHIFT);

   for (c=0;c<channels;c++)
   {
      int frame;
      int nb_frames = frame_size/freq_size;
      celt_assert(nb_frames*freq_size == frame_size);
      OPUS_COPY(in, mem+c*overlap, overlap);
      (*copy_channel_in)(x, 1, pcm, channels, c, len);
      celt_preemphasis(x, in+overlap, frame_size, 1, upsample, celt_mode->preemph, preemph_mem+c, 0);
#ifndef FIXED_POINT
      {
         opus_val32 sum;
         sum = celt_inner_prod(in, in, frame_size+overlap, 0);
         /* This should filter out both NaNs and ridiculous signals that could
            cause NaNs further down. */
         if (!(sum < 1e9f) || celt_isnan(sum))
         {
            OPUS_CLEAR(in, frame_size+overlap);
            preemph_mem[c] = 0;
         }
      }
#endif
      OPUS_CLEAR(bandE, 21);
      for (frame=0;frame<nb_frames;frame++)
      {
         opus_val32 tmpE[21];
         clt_mdct_forward(&celt_mode->mdct, in+960*frame, freq, celt_mode->window,
               overlap, celt_mode->maxLM-LM, 1, arch);
         if (upsample != 1)
         {
            int bound = freq_size/upsample;
            for (i=0;i<bound;i++)
               freq[i] *= upsample;
            for (;i<freq_size;i++)
               freq[i] = 0;
         }

         compute_band_energies(celt_mode, freq, tmpE, 21, 1, LM);
         /* If we have multiple frames, take the max energy. */
         for (i=0;i<21;i++)
            bandE[i] = MAX32(bandE[i], tmpE[i]);
      }
      amp2Log2(celt_mode, 21, 21, bandE, bandLogE+21*c, 1);
      /* Apply spreading function with -6 dB/band going up and -12 dB/band going down. */
      for (i=1;i<21;i++)
         bandLogE[21*c+i] = MAX16(bandLogE[21*c+i], bandLogE[21*c+i-1]-QCONST16(1.f, DB_SHIFT));
      for (i=19;i>=0;i--)
         bandLogE[21*c+i] = MAX16(bandLogE[21*c+i], bandLogE[21*c+i+1]-QCONST16(2.f, DB_SHIFT));
      if (pos[c]==1)
      {
         for (i=0;i<21;i++)
            maskLogE[0][i] = logSum(maskLogE[0][i], bandLogE[21*c+i]);
      } else if (pos[c]==3)
      {
         for (i=0;i<21;i++)
            maskLogE[2][i] = logSum(maskLogE[2][i], bandLogE[21*c+i]);
      } else if (pos[c]==2)
      {
         for (i=0;i<21;i++)
         {
            maskLogE[0][i] = logSum(maskLogE[0][i], bandLogE[21*c+i]-QCONST16(.5f, DB_SHIFT));
            maskLogE[2][i] = logSum(maskLogE[2][i], bandLogE[21*c+i]-QCONST16(.5f, DB_SHIFT));
         }
      }
#if 0
      for (i=0;i<21;i++)
         printf("%f ", bandLogE[21*c+i]);
      float sum=0;
      for (i=0;i<21;i++)
         sum += bandLogE[21*c+i];
      printf("%f ", sum/21);
#endif
      OPUS_COPY(mem+c*overlap, in+frame_size, overlap);
   }
   for (i=0;i<21;i++)
      maskLogE[1][i] = MIN32(maskLogE[0][i],maskLogE[2][i]);
   channel_offset = HALF16(celt_log2(QCONST32(2.f,14)/(channels-1)));
   for (c=0;c<3;c++)
      for (i=0;i<21;i++)
         maskLogE[c][i] += channel_offset;
#if 0
   for (c=0;c<3;c++)
   {
      for (i=0;i<21;i++)
         printf("%f ", maskLogE[c][i]);
   }
#endif
   for (c=0;c<channels;c++)
   {
      opus_val16 *mask;
      if (pos[c]!=0)
      {
         mask = &maskLogE[pos[c]-1][0];
         for (i=0;i<21;i++)
            bandLogE[21*c+i] = bandLogE[21*c+i] - mask[i];
      } else {
         for (i=0;i<21;i++)
            bandLogE[21*c+i] = 0;
      }
#if 0
      for (i=0;i<21;i++)
         printf("%f ", bandLogE[21*c+i]);
      printf("\n");
#endif
#if 0
      float sum=0;
      for (i=0;i<21;i++)
         sum += bandLogE[21*c+i];
      printf("%f ", sum/(float)QCONST32(21.f, DB_SHIFT));
      printf("\n");
#endif
   }
   RESTORE_STACK;
}
Exemplo n.º 7
0
static
#endif
void celt_synthesis(const CELTMode *mode, celt_norm *X, celt_sig * out_syn[],
                    opus_val16 *oldBandE, int start, int effEnd, int C, int CC,
                    int isTransient, int LM, int downsample,
                    int silence, int arch)
{
   int c, i;
   int M;
   int b;
   int B;
   int N, NB;
   int shift;
   int nbEBands;
   int overlap;
   VARDECL(celt_sig, freq);
   SAVE_STACK;

   overlap = mode->overlap;
   nbEBands = mode->nbEBands;
   N = mode->shortMdctSize<<LM;
   ALLOC(freq, N, celt_sig); /**< Interleaved signal MDCTs */
   M = 1<<LM;

   if (isTransient)
   {
      B = M;
      NB = mode->shortMdctSize;
      shift = mode->maxLM;
   } else {
      B = 1;
      NB = mode->shortMdctSize<<LM;
      shift = mode->maxLM-LM;
   }

   if (CC==2&&C==1)
   {
      /* Copying a mono streams to two channels */
      celt_sig *freq2;
      denormalise_bands(mode, X, freq, oldBandE, start, effEnd, M,
            downsample, silence);
      /* Store a temporary copy in the output buffer because the IMDCT destroys its input. */
      freq2 = out_syn[1]+overlap/2;
      OPUS_COPY(freq2, freq, N);
      for (b=0;b<B;b++)
         clt_mdct_backward(&mode->mdct, &freq2[b], out_syn[0]+NB*b, mode->window, overlap, shift, B, arch);
      for (b=0;b<B;b++)
         clt_mdct_backward(&mode->mdct, &freq[b], out_syn[1]+NB*b, mode->window, overlap, shift, B, arch);
   } else if (CC==1&&C==2)
   {
      /* Downmixing a stereo stream to mono */
      celt_sig *freq2;
      freq2 = out_syn[0]+overlap/2;
      denormalise_bands(mode, X, freq, oldBandE, start, effEnd, M,
            downsample, silence);
      /* Use the output buffer as temp array before downmixing. */
      denormalise_bands(mode, X+N, freq2, oldBandE+nbEBands, start, effEnd, M,
            downsample, silence);
      for (i=0;i<N;i++)
         freq[i] = HALF32(ADD32(freq[i],freq2[i]));
      for (b=0;b<B;b++)
         clt_mdct_backward(&mode->mdct, &freq[b], out_syn[0]+NB*b, mode->window, overlap, shift, B, arch);
   } else {
      /* Normal case (mono or stereo) */
      c=0; do {
         denormalise_bands(mode, X+c*N, freq, oldBandE+c*nbEBands, start, effEnd, M,
               downsample, silence);
         for (b=0;b<B;b++)
            clt_mdct_backward(&mode->mdct, &freq[b], out_syn[c]+NB*b, mode->window, overlap, shift, B, arch);
      } while (++c<CC);
   }
   RESTORE_STACK;
}