int opus_packet_pad(unsigned char *data, opus_int32 len, opus_int32 new_len) { OpusRepacketizer rp; opus_int32 ret; if (len < 1) return OPUS_BAD_ARG; if (len==new_len) return OPUS_OK; else if (len > new_len) return OPUS_BAD_ARG; opus_repacketizer_init(&rp); /* Moving payload to the end of the packet so we can do in-place padding */ OPUS_MOVE(data+new_len-len, data, len); opus_repacketizer_cat(&rp, data+new_len-len, len); ret = opus_repacketizer_out_range_impl(&rp, 0, rp.nb_frames, data, new_len, 0, 1); if (ret > 0) return OPUS_OK; else return ret; }
opus_int32 opus_repacketizer_out_range_impl(OpusRepacketizer *rp, int begin, int end, unsigned char *data, opus_int32 maxlen, int self_delimited, int pad) { int i, count; opus_int32 tot_size; opus_int16 *len; const unsigned char **frames; unsigned char * ptr; if (begin<0 || begin>=end || end>rp->nb_frames) { /*fprintf(stderr, "%d %d %d\n", begin, end, rp->nb_frames);*/ return OPUS_BAD_ARG; } count = end-begin; len = rp->len+begin; frames = rp->frames+begin; if (self_delimited) tot_size = 1 + (len[count-1]>=252); else tot_size = 0; ptr = data; if (count==1) { /* Code 0 */ tot_size += len[0]+1; if (tot_size > maxlen) return OPUS_BUFFER_TOO_SMALL; *ptr++ = rp->toc&0xFC; } else if (count==2) { if (len[1] == len[0]) { /* Code 1 */ tot_size += 2*len[0]+1; if (tot_size > maxlen) return OPUS_BUFFER_TOO_SMALL; *ptr++ = (rp->toc&0xFC) | 0x1; } else { /* Code 2 */ tot_size += len[0]+len[1]+2+(len[0]>=252); if (tot_size > maxlen) return OPUS_BUFFER_TOO_SMALL; *ptr++ = (rp->toc&0xFC) | 0x2; ptr += encode_size(len[0], ptr); } } if (count > 2 || (pad && tot_size < maxlen)) { /* Code 3 */ int vbr; int pad_amount=0; /* Restart the process for the padding case */ ptr = data; if (self_delimited) tot_size = 1 + (len[count-1]>=252); else tot_size = 0; vbr = 0; for (i=1;i<count;i++) { if (len[i] != len[0]) { vbr=1; break; } } if (vbr) { tot_size += 2; for (i=0;i<count-1;i++) tot_size += 1 + (len[i]>=252) + len[i]; tot_size += len[count-1]; if (tot_size > maxlen) return OPUS_BUFFER_TOO_SMALL; *ptr++ = (rp->toc&0xFC) | 0x3; *ptr++ = count | 0x80; } else { tot_size += count*len[0]+2; if (tot_size > maxlen) return OPUS_BUFFER_TOO_SMALL; *ptr++ = (rp->toc&0xFC) | 0x3; *ptr++ = count; } pad_amount = pad ? (maxlen-tot_size) : 0; if (pad_amount != 0) { int nb_255s; data[1] |= 0x40; nb_255s = (pad_amount-1)/255; for (i=0;i<nb_255s;i++) *ptr++ = 255; *ptr++ = pad_amount-255*nb_255s-1; tot_size += pad_amount; } if (vbr) { for (i=0;i<count-1;i++) ptr += encode_size(len[i], ptr); } } if (self_delimited) { int sdlen = encode_size(len[count-1], ptr); ptr += sdlen; } /* Copy the actual data */ for (i=0;i<count;i++) { /* Using OPUS_MOVE() instead of OPUS_COPY() in case we're doing in-place padding from opus_packet_pad or opus_packet_unpad(). */ celt_assert(frames[i] + len[i] <= data || ptr <= frames[i]); OPUS_MOVE(ptr, frames[i], len[i]); ptr += len[i]; } if (pad) { /* Fill padding with zeros. */ while (ptr<data+maxlen) *ptr++=0; } return tot_size; }
void tonality_analysis(TonalityAnalysisState *tonal, AnalysisInfo *info_out, const CELTMode *celt_mode, const void *x, int len, int offset, int c1, int c2, int C, int lsb_depth, downmix_func downmix) { int i, b; const kiss_fft_state *kfft; VARDECL(kiss_fft_cpx, in); VARDECL(kiss_fft_cpx, out); int N = 480, N2=240; float * OPUS_RESTRICT A = tonal->angle; float * OPUS_RESTRICT dA = tonal->d_angle; float * OPUS_RESTRICT d2A = tonal->d2_angle; VARDECL(float, tonality); VARDECL(float, noisiness); float band_tonality[NB_TBANDS]; float logE[NB_TBANDS]; float BFCC[8]; float features[25]; float frame_tonality; float max_frame_tonality; /*float tw_sum=0;*/ float frame_noisiness; const float pi4 = (float)(M_PI*M_PI*M_PI*M_PI); float slope=0; float frame_stationarity; float relativeE; float frame_probs[2]; float alpha, alphaE, alphaE2; float frame_loudness; float bandwidth_mask; int bandwidth=0; float maxE = 0; float noise_floor; int remaining; AnalysisInfo *info; SAVE_STACK; tonal->last_transition++; alpha = 1.f/IMIN(20, 1+tonal->count); alphaE = 1.f/IMIN(50, 1+tonal->count); alphaE2 = 1.f/IMIN(1000, 1+tonal->count); if (tonal->count<4) tonal->music_prob = .5; kfft = celt_mode->mdct.kfft[0]; if (tonal->count==0) tonal->mem_fill = 240; downmix(x, &tonal->inmem[tonal->mem_fill], IMIN(len, ANALYSIS_BUF_SIZE-tonal->mem_fill), offset, c1, c2, C); if (tonal->mem_fill+len < ANALYSIS_BUF_SIZE) { tonal->mem_fill += len; /* Don't have enough to update the analysis */ RESTORE_STACK; return; } info = &tonal->info[tonal->write_pos++]; if (tonal->write_pos>=DETECT_SIZE) tonal->write_pos-=DETECT_SIZE; ALLOC(in, 480, kiss_fft_cpx); ALLOC(out, 480, kiss_fft_cpx); ALLOC(tonality, 240, float); ALLOC(noisiness, 240, float); for (i=0;i<N2;i++) { float w = analysis_window[i]; in[i].r = (kiss_fft_scalar)(w*tonal->inmem[i]); in[i].i = (kiss_fft_scalar)(w*tonal->inmem[N2+i]); in[N-i-1].r = (kiss_fft_scalar)(w*tonal->inmem[N-i-1]); in[N-i-1].i = (kiss_fft_scalar)(w*tonal->inmem[N+N2-i-1]); } OPUS_MOVE(tonal->inmem, tonal->inmem+ANALYSIS_BUF_SIZE-240, 240); remaining = len - (ANALYSIS_BUF_SIZE-tonal->mem_fill); downmix(x, &tonal->inmem[240], remaining, offset+ANALYSIS_BUF_SIZE-tonal->mem_fill, c1, c2, C); tonal->mem_fill = 240 + remaining; opus_fft(kfft, in, out); for (i=1;i<N2;i++) { float X1r, X2r, X1i, X2i; float angle, d_angle, d2_angle; float angle2, d_angle2, d2_angle2; float mod1, mod2, avg_mod; X1r = (float)out[i].r+out[N-i].r; X1i = (float)out[i].i-out[N-i].i; X2r = (float)out[i].i+out[N-i].i; X2i = (float)out[N-i].r-out[i].r; angle = (float)(.5f/M_PI)*fast_atan2f(X1i, X1r); d_angle = angle - A[i]; d2_angle = d_angle - dA[i]; angle2 = (float)(.5f/M_PI)*fast_atan2f(X2i, X2r); d_angle2 = angle2 - angle; d2_angle2 = d_angle2 - d_angle; mod1 = d2_angle - (float)floor(.5+d2_angle); noisiness[i] = ABS16(mod1); mod1 *= mod1; mod1 *= mod1; mod2 = d2_angle2 - (float)floor(.5+d2_angle2); noisiness[i] += ABS16(mod2); mod2 *= mod2; mod2 *= mod2; avg_mod = .25f*(d2A[i]+2.f*mod1+mod2); tonality[i] = 1.f/(1.f+40.f*16.f*pi4*avg_mod)-.015f; A[i] = angle2; dA[i] = d_angle2; d2A[i] = mod2; } frame_tonality = 0; max_frame_tonality = 0; /*tw_sum = 0;*/ info->activity = 0; frame_noisiness = 0; frame_stationarity = 0; if (!tonal->count) { for (b=0;b<NB_TBANDS;b++) { tonal->lowE[b] = 1e10; tonal->highE[b] = -1e10; } } relativeE = 0; frame_loudness = 0; for (b=0;b<NB_TBANDS;b++) { float E=0, tE=0, nE=0; float L1, L2; float stationarity; for (i=tbands[b];i<tbands[b+1];i++) { float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i; #ifdef FIXED_POINT /* FIXME: It's probably best to change the BFCC filter initial state instead */ binE *= 5.55e-17f; #endif E += binE; tE += binE*tonality[i]; nE += binE*2.f*(.5f-noisiness[i]); } tonal->E[tonal->E_count][b] = E; frame_noisiness += nE/(1e-15f+E); frame_loudness += (float)sqrt(E+1e-10f); logE[b] = (float)log(E+1e-10f); tonal->lowE[b] = MIN32(logE[b], tonal->lowE[b]+.01f); tonal->highE[b] = MAX32(logE[b], tonal->highE[b]-.1f); if (tonal->highE[b] < tonal->lowE[b]+1.f) { tonal->highE[b]+=.5f; tonal->lowE[b]-=.5f; } relativeE += (logE[b]-tonal->lowE[b])/(1e-15f+tonal->highE[b]-tonal->lowE[b]); L1=L2=0; for (i=0;i<NB_FRAMES;i++) { L1 += (float)sqrt(tonal->E[i][b]); L2 += tonal->E[i][b]; } stationarity = MIN16(0.99f,L1/(float)sqrt(1e-15+NB_FRAMES*L2)); stationarity *= stationarity; stationarity *= stationarity; frame_stationarity += stationarity; /*band_tonality[b] = tE/(1e-15+E)*/; band_tonality[b] = MAX16(tE/(1e-15f+E), stationarity*tonal->prev_band_tonality[b]); #if 0 if (b>=NB_TONAL_SKIP_BANDS) { frame_tonality += tweight[b]*band_tonality[b]; tw_sum += tweight[b]; } #else frame_tonality += band_tonality[b]; if (b>=NB_TBANDS-NB_TONAL_SKIP_BANDS) frame_tonality -= band_tonality[b-NB_TBANDS+NB_TONAL_SKIP_BANDS]; #endif max_frame_tonality = MAX16(max_frame_tonality, (1.f+.03f*(b-NB_TBANDS))*frame_tonality); slope += band_tonality[b]*(b-8); /*printf("%f %f ", band_tonality[b], stationarity);*/ tonal->prev_band_tonality[b] = band_tonality[b]; } bandwidth_mask = 0; bandwidth = 0; maxE = 0; noise_floor = 5.7e-4f/(1<<(IMAX(0,lsb_depth-8))); #ifdef FIXED_POINT noise_floor *= 1<<(15+SIG_SHIFT); #endif noise_floor *= noise_floor; for (b=0;b<NB_TOT_BANDS;b++) { float E=0; int band_start, band_end; /* Keep a margin of 300 Hz for aliasing */ band_start = extra_bands[b]; band_end = extra_bands[b+1]; for (i=band_start;i<band_end;i++) { float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i; E += binE; } maxE = MAX32(maxE, E); tonal->meanE[b] = MAX32((1-alphaE2)*tonal->meanE[b], E); E = MAX32(E, tonal->meanE[b]); /* Use a simple follower with 13 dB/Bark slope for spreading function */ bandwidth_mask = MAX32(.05f*bandwidth_mask, E); /* Consider the band "active" only if all these conditions are met: 1) less than 10 dB below the simple follower 2) less than 90 dB below the peak band (maximal masking possible considering both the ATH and the loudness-dependent slope of the spreading function) 3) above the PCM quantization noise floor */ if (E>.1*bandwidth_mask && E*1e9f > maxE && E > noise_floor*(band_end-band_start)) bandwidth = b; } if (tonal->count<=2) bandwidth = 20; frame_loudness = 20*(float)log10(frame_loudness); tonal->Etracker = MAX32(tonal->Etracker-.03f, frame_loudness); tonal->lowECount *= (1-alphaE); if (frame_loudness < tonal->Etracker-30) tonal->lowECount += alphaE; for (i=0;i<8;i++) { float sum=0; for (b=0;b<16;b++) sum += dct_table[i*16+b]*logE[b]; BFCC[i] = sum; } frame_stationarity /= NB_TBANDS; relativeE /= NB_TBANDS; if (tonal->count<10) relativeE = .5; frame_noisiness /= NB_TBANDS; #if 1 info->activity = frame_noisiness + (1-frame_noisiness)*relativeE; #else info->activity = .5*(1+frame_noisiness-frame_stationarity); #endif frame_tonality = (max_frame_tonality/(NB_TBANDS-NB_TONAL_SKIP_BANDS)); frame_tonality = MAX16(frame_tonality, tonal->prev_tonality*.8f); tonal->prev_tonality = frame_tonality; slope /= 8*8; info->tonality_slope = slope; tonal->E_count = (tonal->E_count+1)%NB_FRAMES; tonal->count++; info->tonality = frame_tonality; for (i=0;i<4;i++) features[i] = -0.12299f*(BFCC[i]+tonal->mem[i+24]) + 0.49195f*(tonal->mem[i]+tonal->mem[i+16]) + 0.69693f*tonal->mem[i+8] - 1.4349f*tonal->cmean[i]; for (i=0;i<4;i++) tonal->cmean[i] = (1-alpha)*tonal->cmean[i] + alpha*BFCC[i]; for (i=0;i<4;i++) features[4+i] = 0.63246f*(BFCC[i]-tonal->mem[i+24]) + 0.31623f*(tonal->mem[i]-tonal->mem[i+16]); for (i=0;i<3;i++) features[8+i] = 0.53452f*(BFCC[i]+tonal->mem[i+24]) - 0.26726f*(tonal->mem[i]+tonal->mem[i+16]) -0.53452f*tonal->mem[i+8]; if (tonal->count > 5) { for (i=0;i<9;i++) tonal->std[i] = (1-alpha)*tonal->std[i] + alpha*features[i]*features[i]; } for (i=0;i<8;i++) { tonal->mem[i+24] = tonal->mem[i+16]; tonal->mem[i+16] = tonal->mem[i+8]; tonal->mem[i+8] = tonal->mem[i]; tonal->mem[i] = BFCC[i]; } for (i=0;i<9;i++) features[11+i] = (float)sqrt(tonal->std[i]); features[20] = info->tonality; features[21] = info->activity; features[22] = frame_stationarity; features[23] = info->tonality_slope; features[24] = tonal->lowECount; #ifndef DISABLE_FLOAT_API mlp_process(&net, features, frame_probs); frame_probs[0] = .5f*(frame_probs[0]+1); /* Curve fitting between the MLP probability and the actual probability */ frame_probs[0] = .01f + 1.21f*frame_probs[0]*frame_probs[0] - .23f*(float)pow(frame_probs[0], 10); /* Probability of active audio (as opposed to silence) */ frame_probs[1] = .5f*frame_probs[1]+.5f; /* Consider that silence has a 50-50 probability. */ frame_probs[0] = frame_probs[1]*frame_probs[0] + (1-frame_probs[1])*.5f; /*printf("%f %f ", frame_probs[0], frame_probs[1]);*/ { /* Probability of state transition */ float tau; /* Represents independence of the MLP probabilities, where beta=1 means fully independent. */ float beta; /* Denormalized probability of speech (p0) and music (p1) after update */ float p0, p1; /* Probabilities for "all speech" and "all music" */ float s0, m0; /* Probability sum for renormalisation */ float psum; /* Instantaneous probability of speech and music, with beta pre-applied. */ float speech0; float music0; /* One transition every 3 minutes of active audio */ tau = .00005f*frame_probs[1]; beta = .05f; if (1) { /* Adapt beta based on how "unexpected" the new prob is */ float p, q; p = MAX16(.05f,MIN16(.95f,frame_probs[0])); q = MAX16(.05f,MIN16(.95f,tonal->music_prob)); beta = .01f+.05f*ABS16(p-q)/(p*(1-q)+q*(1-p)); } /* p0 and p1 are the probabilities of speech and music at this frame using only information from previous frame and applying the state transition model */ p0 = (1-tonal->music_prob)*(1-tau) + tonal->music_prob *tau; p1 = tonal->music_prob *(1-tau) + (1-tonal->music_prob)*tau; /* We apply the current probability with exponent beta to work around the fact that the probability estimates aren't independent. */ p0 *= (float)pow(1-frame_probs[0], beta); p1 *= (float)pow(frame_probs[0], beta); /* Normalise the probabilities to get the Marokv probability of music. */ tonal->music_prob = p1/(p0+p1); info->music_prob = tonal->music_prob; /* This chunk of code deals with delayed decision. */ psum=1e-20f; /* Instantaneous probability of speech and music, with beta pre-applied. */ speech0 = (float)pow(1-frame_probs[0], beta); music0 = (float)pow(frame_probs[0], beta); if (tonal->count==1) { tonal->pspeech[0]=.5; tonal->pmusic [0]=.5; } /* Updated probability of having only speech (s0) or only music (m0), before considering the new observation. */ s0 = tonal->pspeech[0] + tonal->pspeech[1]; m0 = tonal->pmusic [0] + tonal->pmusic [1]; /* Updates s0 and m0 with instantaneous probability. */ tonal->pspeech[0] = s0*(1-tau)*speech0; tonal->pmusic [0] = m0*(1-tau)*music0; /* Propagate the transition probabilities */ for (i=1;i<DETECT_SIZE-1;i++) { tonal->pspeech[i] = tonal->pspeech[i+1]*speech0; tonal->pmusic [i] = tonal->pmusic [i+1]*music0; } /* Probability that the latest frame is speech, when all the previous ones were music. */ tonal->pspeech[DETECT_SIZE-1] = m0*tau*speech0; /* Probability that the latest frame is music, when all the previous ones were speech. */ tonal->pmusic [DETECT_SIZE-1] = s0*tau*music0; /* Renormalise probabilities to 1 */ for (i=0;i<DETECT_SIZE;i++) psum += tonal->pspeech[i] + tonal->pmusic[i]; psum = 1.f/psum; for (i=0;i<DETECT_SIZE;i++) { tonal->pspeech[i] *= psum; tonal->pmusic [i] *= psum; } psum = tonal->pmusic[0]; for (i=1;i<DETECT_SIZE;i++) psum += tonal->pspeech[i]; /* Estimate our confidence in the speech/music decisions */ if (frame_probs[1]>.75) { if (tonal->music_prob>.9) { float adapt; adapt = 1.f/(++tonal->music_confidence_count); tonal->music_confidence_count = IMIN(tonal->music_confidence_count, 500); tonal->music_confidence += adapt*MAX16(-.2f,frame_probs[0]-tonal->music_confidence); } if (tonal->music_prob<.1) { float adapt; adapt = 1.f/(++tonal->speech_confidence_count); tonal->speech_confidence_count = IMIN(tonal->speech_confidence_count, 500); tonal->speech_confidence += adapt*MIN16(.2f,frame_probs[0]-tonal->speech_confidence); } } else { if (tonal->music_confidence_count==0) tonal->music_confidence = .9f; if (tonal->speech_confidence_count==0) tonal->speech_confidence = .1f; } } if (tonal->last_music != (tonal->music_prob>.5f)) tonal->last_transition=0; tonal->last_music = tonal->music_prob>.5f; #else info->music_prob = 0; #endif /*for (i=0;i<25;i++) printf("%f ", features[i]); printf("\n");*/ info->bandwidth = bandwidth; /*printf("%d %d\n", info->bandwidth, info->opus_bandwidth);*/ info->noisiness = frame_noisiness; info->valid = 1; if (info_out!=NULL) OPUS_COPY(info_out, info, 1); RESTORE_STACK; }
static void tonality_analysis(TonalityAnalysisState *tonal, const CELTMode *celt_mode, const void *x, int len, int offset, int c1, int c2, int C, int lsb_depth, downmix_func downmix) { int i, b; const kiss_fft_state *kfft; VARDECL(kiss_fft_cpx, in); VARDECL(kiss_fft_cpx, out); int N = 480, N2=240; float * OPUS_RESTRICT A = tonal->angle; float * OPUS_RESTRICT dA = tonal->d_angle; float * OPUS_RESTRICT d2A = tonal->d2_angle; VARDECL(float, tonality); VARDECL(float, noisiness); float band_tonality[NB_TBANDS]; float logE[NB_TBANDS]; float BFCC[8]; float features[25]; float frame_tonality; float max_frame_tonality; /*float tw_sum=0;*/ float frame_noisiness; const float pi4 = (float)(M_PI*M_PI*M_PI*M_PI); float slope=0; float frame_stationarity; float relativeE; float frame_probs[2]; float alpha, alphaE, alphaE2; float frame_loudness; float bandwidth_mask; int bandwidth=0; float maxE = 0; float noise_floor; int remaining; AnalysisInfo *info; float hp_ener; float tonality2[240]; float midE[8]; float spec_variability=0; float band_log2[NB_TBANDS+1]; float leakage_from[NB_TBANDS+1]; float leakage_to[NB_TBANDS+1]; SAVE_STACK; alpha = 1.f/IMIN(10, 1+tonal->count); alphaE = 1.f/IMIN(25, 1+tonal->count); alphaE2 = 1.f/IMIN(500, 1+tonal->count); if (tonal->Fs == 48000) { /* len and offset are now at 24 kHz. */ len/= 2; offset /= 2; } else if (tonal->Fs == 16000) { len = 3*len/2; offset = 3*offset/2; } if (tonal->count<4) { if (tonal->application == OPUS_APPLICATION_VOIP) tonal->music_prob = .1f; else tonal->music_prob = .625f; } kfft = celt_mode->mdct.kfft[0]; if (tonal->count==0) tonal->mem_fill = 240; tonal->hp_ener_accum += (float)downmix_and_resample(downmix, x, &tonal->inmem[tonal->mem_fill], tonal->downmix_state, IMIN(len, ANALYSIS_BUF_SIZE-tonal->mem_fill), offset, c1, c2, C, tonal->Fs); if (tonal->mem_fill+len < ANALYSIS_BUF_SIZE) { tonal->mem_fill += len; /* Don't have enough to update the analysis */ RESTORE_STACK; return; } hp_ener = tonal->hp_ener_accum; info = &tonal->info[tonal->write_pos++]; if (tonal->write_pos>=DETECT_SIZE) tonal->write_pos-=DETECT_SIZE; ALLOC(in, 480, kiss_fft_cpx); ALLOC(out, 480, kiss_fft_cpx); ALLOC(tonality, 240, float); ALLOC(noisiness, 240, float); for (i=0;i<N2;i++) { float w = analysis_window[i]; in[i].r = (kiss_fft_scalar)(w*tonal->inmem[i]); in[i].i = (kiss_fft_scalar)(w*tonal->inmem[N2+i]); in[N-i-1].r = (kiss_fft_scalar)(w*tonal->inmem[N-i-1]); in[N-i-1].i = (kiss_fft_scalar)(w*tonal->inmem[N+N2-i-1]); } OPUS_MOVE(tonal->inmem, tonal->inmem+ANALYSIS_BUF_SIZE-240, 240); remaining = len - (ANALYSIS_BUF_SIZE-tonal->mem_fill); tonal->hp_ener_accum = (float)downmix_and_resample(downmix, x, &tonal->inmem[240], tonal->downmix_state, remaining, offset+ANALYSIS_BUF_SIZE-tonal->mem_fill, c1, c2, C, tonal->Fs); tonal->mem_fill = 240 + remaining; opus_fft(kfft, in, out, tonal->arch); #ifndef FIXED_POINT /* If there's any NaN on the input, the entire output will be NaN, so we only need to check one value. */ if (celt_isnan(out[0].r)) { info->valid = 0; RESTORE_STACK; return; } #endif for (i=1;i<N2;i++) { float X1r, X2r, X1i, X2i; float angle, d_angle, d2_angle; float angle2, d_angle2, d2_angle2; float mod1, mod2, avg_mod; X1r = (float)out[i].r+out[N-i].r; X1i = (float)out[i].i-out[N-i].i; X2r = (float)out[i].i+out[N-i].i; X2i = (float)out[N-i].r-out[i].r; angle = (float)(.5f/M_PI)*fast_atan2f(X1i, X1r); d_angle = angle - A[i]; d2_angle = d_angle - dA[i]; angle2 = (float)(.5f/M_PI)*fast_atan2f(X2i, X2r); d_angle2 = angle2 - angle; d2_angle2 = d_angle2 - d_angle; mod1 = d2_angle - (float)float2int(d2_angle); noisiness[i] = ABS16(mod1); mod1 *= mod1; mod1 *= mod1; mod2 = d2_angle2 - (float)float2int(d2_angle2); noisiness[i] += ABS16(mod2); mod2 *= mod2; mod2 *= mod2; avg_mod = .25f*(d2A[i]+mod1+2*mod2); /* This introduces an extra delay of 2 frames in the detection. */ tonality[i] = 1.f/(1.f+40.f*16.f*pi4*avg_mod)-.015f; /* No delay on this detection, but it's less reliable. */ tonality2[i] = 1.f/(1.f+40.f*16.f*pi4*mod2)-.015f; A[i] = angle2; dA[i] = d_angle2; d2A[i] = mod2; } for (i=2;i<N2-1;i++) { float tt = MIN32(tonality2[i], MAX32(tonality2[i-1], tonality2[i+1])); tonality[i] = .9f*MAX32(tonality[i], tt-.1f); } frame_tonality = 0; max_frame_tonality = 0; /*tw_sum = 0;*/ info->activity = 0; frame_noisiness = 0; frame_stationarity = 0; if (!tonal->count) { for (b=0;b<NB_TBANDS;b++) { tonal->lowE[b] = 1e10; tonal->highE[b] = -1e10; } } relativeE = 0; frame_loudness = 0; /* The energy of the very first band is special because of DC. */ { float E = 0; float X1r, X2r; X1r = 2*(float)out[0].r; X2r = 2*(float)out[0].i; E = X1r*X1r + X2r*X2r; for (i=1;i<4;i++) { float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i; E += binE; } E = SCALE_ENER(E); band_log2[0] = .5f*1.442695f*(float)log(E+1e-10f); } for (b=0;b<NB_TBANDS;b++) { float E=0, tE=0, nE=0; float L1, L2; float stationarity; for (i=tbands[b];i<tbands[b+1];i++) { float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i; binE = SCALE_ENER(binE); E += binE; tE += binE*MAX32(0, tonality[i]); nE += binE*2.f*(.5f-noisiness[i]); } #ifndef FIXED_POINT /* Check for extreme band energies that could cause NaNs later. */ if (!(E<1e9f) || celt_isnan(E)) { info->valid = 0; RESTORE_STACK; return; } #endif tonal->E[tonal->E_count][b] = E; frame_noisiness += nE/(1e-15f+E); frame_loudness += (float)sqrt(E+1e-10f); logE[b] = (float)log(E+1e-10f); band_log2[b+1] = .5f*1.442695f*(float)log(E+1e-10f); tonal->logE[tonal->E_count][b] = logE[b]; if (tonal->count==0) tonal->highE[b] = tonal->lowE[b] = logE[b]; if (tonal->highE[b] > tonal->lowE[b] + 7.5) { if (tonal->highE[b] - logE[b] > logE[b] - tonal->lowE[b]) tonal->highE[b] -= .01f; else tonal->lowE[b] += .01f; } if (logE[b] > tonal->highE[b]) { tonal->highE[b] = logE[b]; tonal->lowE[b] = MAX32(tonal->highE[b]-15, tonal->lowE[b]); } else if (logE[b] < tonal->lowE[b]) { tonal->lowE[b] = logE[b]; tonal->highE[b] = MIN32(tonal->lowE[b]+15, tonal->highE[b]); } relativeE += (logE[b]-tonal->lowE[b])/(1e-15f + (tonal->highE[b]-tonal->lowE[b])); L1=L2=0; for (i=0;i<NB_FRAMES;i++) { L1 += (float)sqrt(tonal->E[i][b]); L2 += tonal->E[i][b]; } stationarity = MIN16(0.99f,L1/(float)sqrt(1e-15+NB_FRAMES*L2)); stationarity *= stationarity; stationarity *= stationarity; frame_stationarity += stationarity; /*band_tonality[b] = tE/(1e-15+E)*/; band_tonality[b] = MAX16(tE/(1e-15f+E), stationarity*tonal->prev_band_tonality[b]); #if 0 if (b>=NB_TONAL_SKIP_BANDS) { frame_tonality += tweight[b]*band_tonality[b]; tw_sum += tweight[b]; } #else frame_tonality += band_tonality[b]; if (b>=NB_TBANDS-NB_TONAL_SKIP_BANDS) frame_tonality -= band_tonality[b-NB_TBANDS+NB_TONAL_SKIP_BANDS]; #endif max_frame_tonality = MAX16(max_frame_tonality, (1.f+.03f*(b-NB_TBANDS))*frame_tonality); slope += band_tonality[b]*(b-8); /*printf("%f %f ", band_tonality[b], stationarity);*/ tonal->prev_band_tonality[b] = band_tonality[b]; } leakage_from[0] = band_log2[0]; leakage_to[0] = band_log2[0] - LEAKAGE_OFFSET; for (b=1;b<NB_TBANDS+1;b++) { float leak_slope = LEAKAGE_SLOPE*(tbands[b]-tbands[b-1])/4; leakage_from[b] = MIN16(leakage_from[b-1]+leak_slope, band_log2[b]); leakage_to[b] = MAX16(leakage_to[b-1]-leak_slope, band_log2[b]-LEAKAGE_OFFSET); } for (b=NB_TBANDS-2;b>=0;b--) { float leak_slope = LEAKAGE_SLOPE*(tbands[b+1]-tbands[b])/4; leakage_from[b] = MIN16(leakage_from[b+1]+leak_slope, leakage_from[b]); leakage_to[b] = MAX16(leakage_to[b+1]-leak_slope, leakage_to[b]); } celt_assert(NB_TBANDS+1 <= LEAK_BANDS); for (b=0;b<NB_TBANDS+1;b++) { /* leak_boost[] is made up of two terms. The first, based on leakage_to[], represents the boost needed to overcome the amount of analysis leakage cause in a weaker band b by louder neighbouring bands. The second, based on leakage_from[], applies to a loud band b for which the quantization noise causes synthesis leakage to the weaker neighbouring bands. */ float boost = MAX16(0, leakage_to[b] - band_log2[b]) + MAX16(0, band_log2[b] - (leakage_from[b]+LEAKAGE_OFFSET)); info->leak_boost[b] = IMIN(255, (int)floor(.5 + 64.f*boost)); } for (;b<LEAK_BANDS;b++) info->leak_boost[b] = 0; for (i=0;i<NB_FRAMES;i++) { int j; float mindist = 1e15f; for (j=0;j<NB_FRAMES;j++) { int k; float dist=0; for (k=0;k<NB_TBANDS;k++) { float tmp; tmp = tonal->logE[i][k] - tonal->logE[j][k]; dist += tmp*tmp; } if (j!=i) mindist = MIN32(mindist, dist); } spec_variability += mindist; } spec_variability = (float)sqrt(spec_variability/NB_FRAMES/NB_TBANDS); bandwidth_mask = 0; bandwidth = 0; maxE = 0; noise_floor = 5.7e-4f/(1<<(IMAX(0,lsb_depth-8))); noise_floor *= noise_floor; for (b=0;b<NB_TBANDS;b++) { float E=0; int band_start, band_end; /* Keep a margin of 300 Hz for aliasing */ band_start = tbands[b]; band_end = tbands[b+1]; for (i=band_start;i<band_end;i++) { float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i; E += binE; } E = SCALE_ENER(E); maxE = MAX32(maxE, E); tonal->meanE[b] = MAX32((1-alphaE2)*tonal->meanE[b], E); E = MAX32(E, tonal->meanE[b]); /* Use a simple follower with 13 dB/Bark slope for spreading function */ bandwidth_mask = MAX32(.05f*bandwidth_mask, E); /* Consider the band "active" only if all these conditions are met: 1) less than 10 dB below the simple follower 2) less than 90 dB below the peak band (maximal masking possible considering both the ATH and the loudness-dependent slope of the spreading function) 3) above the PCM quantization noise floor We use b+1 because the first CELT band isn't included in tbands[] */ if (E>.1*bandwidth_mask && E*1e9f > maxE && E > noise_floor*(band_end-band_start)) bandwidth = b+1; } /* Special case for the last two bands, for which we don't have spectrum but only the energy above 12 kHz. */ if (tonal->Fs == 48000) { float ratio; float E = hp_ener*(1.f/(240*240)); ratio = tonal->prev_bandwidth==20 ? 0.03f : 0.07f; #ifdef FIXED_POINT /* silk_resampler_down2_hp() shifted right by an extra 8 bits. */ E *= 256.f*(1.f/Q15ONE)*(1.f/Q15ONE); #endif maxE = MAX32(maxE, E); tonal->meanE[b] = MAX32((1-alphaE2)*tonal->meanE[b], E); E = MAX32(E, tonal->meanE[b]); /* Use a simple follower with 13 dB/Bark slope for spreading function */ bandwidth_mask = MAX32(.05f*bandwidth_mask, E); if (E>ratio*bandwidth_mask && E*1e9f > maxE && E > noise_floor*160) bandwidth = 20; /* This detector is unreliable, so if the bandwidth is close to SWB, assume it's FB. */ if (bandwidth >= 17) bandwidth = 20; } if (tonal->count<=2) bandwidth = 20; frame_loudness = 20*(float)log10(frame_loudness); tonal->Etracker = MAX32(tonal->Etracker-.003f, frame_loudness); tonal->lowECount *= (1-alphaE); if (frame_loudness < tonal->Etracker-30) tonal->lowECount += alphaE; for (i=0;i<8;i++) { float sum=0; for (b=0;b<16;b++) sum += dct_table[i*16+b]*logE[b]; BFCC[i] = sum; } for (i=0;i<8;i++) { float sum=0; for (b=0;b<16;b++) sum += dct_table[i*16+b]*.5f*(tonal->highE[b]+tonal->lowE[b]); midE[i] = sum; } frame_stationarity /= NB_TBANDS; relativeE /= NB_TBANDS; if (tonal->count<10) relativeE = .5f; frame_noisiness /= NB_TBANDS; #if 1 info->activity = frame_noisiness + (1-frame_noisiness)*relativeE; #else info->activity = .5*(1+frame_noisiness-frame_stationarity); #endif frame_tonality = (max_frame_tonality/(NB_TBANDS-NB_TONAL_SKIP_BANDS)); frame_tonality = MAX16(frame_tonality, tonal->prev_tonality*.8f); tonal->prev_tonality = frame_tonality; slope /= 8*8; info->tonality_slope = slope; tonal->E_count = (tonal->E_count+1)%NB_FRAMES; tonal->count = IMIN(tonal->count+1, ANALYSIS_COUNT_MAX); info->tonality = frame_tonality; for (i=0;i<4;i++) features[i] = -0.12299f*(BFCC[i]+tonal->mem[i+24]) + 0.49195f*(tonal->mem[i]+tonal->mem[i+16]) + 0.69693f*tonal->mem[i+8] - 1.4349f*tonal->cmean[i]; for (i=0;i<4;i++) tonal->cmean[i] = (1-alpha)*tonal->cmean[i] + alpha*BFCC[i]; for (i=0;i<4;i++) features[4+i] = 0.63246f*(BFCC[i]-tonal->mem[i+24]) + 0.31623f*(tonal->mem[i]-tonal->mem[i+16]); for (i=0;i<3;i++) features[8+i] = 0.53452f*(BFCC[i]+tonal->mem[i+24]) - 0.26726f*(tonal->mem[i]+tonal->mem[i+16]) -0.53452f*tonal->mem[i+8]; if (tonal->count > 5) { for (i=0;i<9;i++) tonal->std[i] = (1-alpha)*tonal->std[i] + alpha*features[i]*features[i]; } for (i=0;i<4;i++) features[i] = BFCC[i]-midE[i]; for (i=0;i<8;i++) { tonal->mem[i+24] = tonal->mem[i+16]; tonal->mem[i+16] = tonal->mem[i+8]; tonal->mem[i+8] = tonal->mem[i]; tonal->mem[i] = BFCC[i]; } for (i=0;i<9;i++) features[11+i] = (float)sqrt(tonal->std[i]) - std_feature_bias[i]; features[18] = spec_variability - 0.78f; features[20] = info->tonality - 0.154723f; features[21] = info->activity - 0.724643f; features[22] = frame_stationarity - 0.743717f; features[23] = info->tonality_slope + 0.069216f; features[24] = tonal->lowECount - 0.067930f; mlp_process(&net, features, frame_probs); frame_probs[0] = .5f*(frame_probs[0]+1); /* Curve fitting between the MLP probability and the actual probability */ /*frame_probs[0] = .01f + 1.21f*frame_probs[0]*frame_probs[0] - .23f*(float)pow(frame_probs[0], 10);*/ /* Probability of active audio (as opposed to silence) */ frame_probs[1] = .5f*frame_probs[1]+.5f; frame_probs[1] *= frame_probs[1]; /* Probability of speech or music vs noise */ info->activity_probability = frame_probs[1]; /*printf("%f %f\n", frame_probs[0], frame_probs[1]);*/ { /* Probability of state transition */ float tau; /* Represents independence of the MLP probabilities, where beta=1 means fully independent. */ float beta; /* Denormalized probability of speech (p0) and music (p1) after update */ float p0, p1; /* Probabilities for "all speech" and "all music" */ float s0, m0; /* Probability sum for renormalisation */ float psum; /* Instantaneous probability of speech and music, with beta pre-applied. */ float speech0; float music0; float p, q; /* More silence transitions for speech than for music. */ tau = .001f*tonal->music_prob + .01f*(1-tonal->music_prob); p = MAX16(.05f,MIN16(.95f,frame_probs[1])); q = MAX16(.05f,MIN16(.95f,tonal->vad_prob)); beta = .02f+.05f*ABS16(p-q)/(p*(1-q)+q*(1-p)); /* p0 and p1 are the probabilities of speech and music at this frame using only information from previous frame and applying the state transition model */ p0 = (1-tonal->vad_prob)*(1-tau) + tonal->vad_prob *tau; p1 = tonal->vad_prob *(1-tau) + (1-tonal->vad_prob)*tau; /* We apply the current probability with exponent beta to work around the fact that the probability estimates aren't independent. */ p0 *= (float)pow(1-frame_probs[1], beta); p1 *= (float)pow(frame_probs[1], beta); /* Normalise the probabilities to get the Marokv probability of music. */ tonal->vad_prob = p1/(p0+p1); info->vad_prob = tonal->vad_prob; /* Consider that silence has a 50-50 probability of being speech or music. */ frame_probs[0] = tonal->vad_prob*frame_probs[0] + (1-tonal->vad_prob)*.5f; /* One transition every 3 minutes of active audio */ tau = .0001f; /* Adapt beta based on how "unexpected" the new prob is */ p = MAX16(.05f,MIN16(.95f,frame_probs[0])); q = MAX16(.05f,MIN16(.95f,tonal->music_prob)); beta = .02f+.05f*ABS16(p-q)/(p*(1-q)+q*(1-p)); /* p0 and p1 are the probabilities of speech and music at this frame using only information from previous frame and applying the state transition model */ p0 = (1-tonal->music_prob)*(1-tau) + tonal->music_prob *tau; p1 = tonal->music_prob *(1-tau) + (1-tonal->music_prob)*tau; /* We apply the current probability with exponent beta to work around the fact that the probability estimates aren't independent. */ p0 *= (float)pow(1-frame_probs[0], beta); p1 *= (float)pow(frame_probs[0], beta); /* Normalise the probabilities to get the Marokv probability of music. */ tonal->music_prob = p1/(p0+p1); info->music_prob = tonal->music_prob; /*printf("%f %f %f %f\n", frame_probs[0], frame_probs[1], tonal->music_prob, tonal->vad_prob);*/ /* This chunk of code deals with delayed decision. */ psum=1e-20f; /* Instantaneous probability of speech and music, with beta pre-applied. */ speech0 = (float)pow(1-frame_probs[0], beta); music0 = (float)pow(frame_probs[0], beta); if (tonal->count==1) { if (tonal->application == OPUS_APPLICATION_VOIP) tonal->pmusic[0] = .1f; else tonal->pmusic[0] = .625f; tonal->pspeech[0] = 1-tonal->pmusic[0]; } /* Updated probability of having only speech (s0) or only music (m0), before considering the new observation. */ s0 = tonal->pspeech[0] + tonal->pspeech[1]; m0 = tonal->pmusic [0] + tonal->pmusic [1]; /* Updates s0 and m0 with instantaneous probability. */ tonal->pspeech[0] = s0*(1-tau)*speech0; tonal->pmusic [0] = m0*(1-tau)*music0; /* Propagate the transition probabilities */ for (i=1;i<DETECT_SIZE-1;i++) { tonal->pspeech[i] = tonal->pspeech[i+1]*speech0; tonal->pmusic [i] = tonal->pmusic [i+1]*music0; } /* Probability that the latest frame is speech, when all the previous ones were music. */ tonal->pspeech[DETECT_SIZE-1] = m0*tau*speech0; /* Probability that the latest frame is music, when all the previous ones were speech. */ tonal->pmusic [DETECT_SIZE-1] = s0*tau*music0; /* Renormalise probabilities to 1 */ for (i=0;i<DETECT_SIZE;i++) psum += tonal->pspeech[i] + tonal->pmusic[i]; psum = 1.f/psum; for (i=0;i<DETECT_SIZE;i++) { tonal->pspeech[i] *= psum; tonal->pmusic [i] *= psum; } psum = tonal->pmusic[0]; for (i=1;i<DETECT_SIZE;i++) psum += tonal->pspeech[i]; /* Estimate our confidence in the speech/music decisions */ if (frame_probs[1]>.75) { if (tonal->music_prob>.9) { float adapt; adapt = 1.f/(++tonal->music_confidence_count); tonal->music_confidence_count = IMIN(tonal->music_confidence_count, 500); tonal->music_confidence += adapt*MAX16(-.2f,frame_probs[0]-tonal->music_confidence); } if (tonal->music_prob<.1) { float adapt; adapt = 1.f/(++tonal->speech_confidence_count); tonal->speech_confidence_count = IMIN(tonal->speech_confidence_count, 500); tonal->speech_confidence += adapt*MIN16(.2f,frame_probs[0]-tonal->speech_confidence); } } } tonal->last_music = tonal->music_prob>.5f; #ifdef MLP_TRAINING for (i=0;i<25;i++) printf("%f ", features[i]); printf("\n"); #endif info->bandwidth = bandwidth; tonal->prev_bandwidth = bandwidth; /*printf("%d %d\n", info->bandwidth, info->opus_bandwidth);*/ info->noisiness = frame_noisiness; info->valid = 1; RESTORE_STACK; }