/* Glues concealed frames with new good recieved frames */ void SKP_Silk_PLC_glue_frames( SKP_Silk_decoder_state *psDec, /* I/O decoder state */ SKP_Silk_decoder_control *psDecCtrl, /* I/O Decoder control */ SKP_int16 signal[], /* I/O signal */ SKP_int length /* I length of residual */ ) { SKP_int i, energy_shift; SKP_int32 energy; SKP_Silk_PLC_struct *psPLC; psPLC = &psDec->sPLC; if( psDec->lossCnt ) { /* Calculate energy in concealed residual */ SKP_Silk_sum_sqr_shift( &psPLC->conc_energy, &psPLC->conc_energy_shift, signal, length ); psPLC->last_frame_lost = 1; } else { if( psDec->sPLC.last_frame_lost ) { /* Calculate residual in decoded signal if last frame was lost */ SKP_Silk_sum_sqr_shift( &energy, &energy_shift, signal, length ); /* Normalize energies */ if( energy_shift > psPLC->conc_energy_shift ) { psPLC->conc_energy = SKP_RSHIFT( psPLC->conc_energy, energy_shift - psPLC->conc_energy_shift ); } else if( energy_shift < psPLC->conc_energy_shift ) { energy = SKP_RSHIFT( energy, psPLC->conc_energy_shift - energy_shift ); } /* Fade in the energy difference */ if( energy > psPLC->conc_energy ) { SKP_int32 frac_Q24, LZ; SKP_int32 gain_Q12, slope_Q12; LZ = SKP_Silk_CLZ32( psPLC->conc_energy ); LZ = LZ - 1; psPLC->conc_energy = SKP_LSHIFT( psPLC->conc_energy, LZ ); energy = SKP_RSHIFT( energy, SKP_max_32( 24 - LZ, 0 ) ); frac_Q24 = SKP_DIV32( psPLC->conc_energy, SKP_max( energy, 1 ) ); gain_Q12 = SKP_Silk_SQRT_APPROX( frac_Q24 ); slope_Q12 = SKP_DIV32_16( ( 1 << 12 ) - gain_Q12, length ); for( i = 0; i < length; i++ ) { signal[ i ] = SKP_RSHIFT( SKP_MUL( gain_Q12, signal[ i ] ), 12 ); gain_Q12 += slope_Q12; gain_Q12 = SKP_min( gain_Q12, ( 1 << 12 ) ); } } } psPLC->last_frame_lost = 0; } }
void SKP_Silk_detect_SWB_input(SKP_Silk_detect_SWB_state * psSWBdetect, /* (I/O) encoder state */ const int16_t samplesIn[], /* (I) input to encoder */ int nSamplesIn /* (I) length of input */ ) { int HP_8_kHz_len, i; int16_t in_HP_8_kHz[MAX_FRAME_LENGTH]; int32_t energy_32, shift; /* High pass filter with cutoff at 8 khz */ HP_8_kHz_len = SKP_min_int(nSamplesIn, MAX_FRAME_LENGTH); HP_8_kHz_len = SKP_max_int(HP_8_kHz_len, 0); /* Cutoff around 9 khz */ /* A = conv(conv([8192,14613, 6868], [8192,12883, 7337]), [8192,11586, 7911]); */ /* B = conv(conv([575, -948, 575], [575, -221, 575]), [575, 104, 575]); */ SKP_Silk_biquad(samplesIn, SKP_Silk_SWB_detect_B_HP_Q13[0], SKP_Silk_SWB_detect_A_HP_Q13[0], psSWBdetect->S_HP_8_kHz[0], in_HP_8_kHz, HP_8_kHz_len); for (i = 1; i < NB_SOS; i++) { SKP_Silk_biquad(in_HP_8_kHz, SKP_Silk_SWB_detect_B_HP_Q13[i], SKP_Silk_SWB_detect_A_HP_Q13[i], psSWBdetect->S_HP_8_kHz[i], in_HP_8_kHz, HP_8_kHz_len); } /* Calculate energy in HP signal */ SKP_Silk_sum_sqr_shift(&energy_32, &shift, in_HP_8_kHz, HP_8_kHz_len); /* Count concecutive samples above threshold, after adjusting threshold for number of input samples and shift */ if (energy_32 > SKP_RSHIFT(SKP_SMULBB(HP_8_KHZ_THRES, HP_8_kHz_len), shift)) { psSWBdetect->ConsecSmplsAboveThres += nSamplesIn; if (psSWBdetect->ConsecSmplsAboveThres > CONCEC_SWB_SMPLS_THRES) { psSWBdetect->SWB_detected = 1; } } else { psSWBdetect->ConsecSmplsAboveThres -= nSamplesIn; psSWBdetect->ConsecSmplsAboveThres = SKP_max(psSWBdetect->ConsecSmplsAboveThres, 0); } /* If sufficient speech activity and no SWB detected, we detect the signal as being WB */ if ((psSWBdetect->ActiveSpeech_ms > WB_DETECT_ACTIVE_SPEECH_MS_THRES) && (psSWBdetect->SWB_detected == 0)) { psSWBdetect->WB_detected = 1; } }
/* Calculates correlation matrix X'*X */ void SKP_Silk_corrMatrix_FIX( const SKP_int16 *x, /* I x vector [L + order - 1] used to form data matrix X */ const SKP_int L, /* I Length of vectors */ const SKP_int order, /* I Max lag for correlation */ const SKP_int head_room, /* I Desired headroom */ SKP_int32 *XX, /* O Pointer to X'*X correlation matrix [ order x order ]*/ SKP_int *rshifts /* I/O Right shifts of correlations */ ) { SKP_int i, j, lag, rshifts_local, head_room_rshifts; SKP_int32 energy; const SKP_int16 *ptr1, *ptr2; /* Calculate energy to find shift used to fit in 32 bits */ SKP_Silk_sum_sqr_shift( &energy, &rshifts_local, x, L + order - 1 ); /* Add shifts to get the desired head room */ head_room_rshifts = SKP_max( head_room - SKP_Silk_CLZ32( energy ), 0 ); energy = SKP_RSHIFT32( energy, head_room_rshifts ); rshifts_local += head_room_rshifts; /* Calculate energy of first column (0) of X: X[:,0]'*X[:,0] */ /* Remove contribution of first order - 1 samples */ for( i = 0; i < order - 1; i++ ) { energy -= SKP_RSHIFT32( SKP_SMULBB( x[ i ], x[ i ] ), rshifts_local ); } if( rshifts_local < *rshifts ) { /* Adjust energy */ energy = SKP_RSHIFT32( energy, *rshifts - rshifts_local ); rshifts_local = *rshifts; } /* Calculate energy of remaining columns of X: X[:,j]'*X[:,j] */ /* Fill out the diagonal of the correlation matrix */ matrix_ptr( XX, 0, 0, order ) = energy; ptr1 = &x[ order - 1 ]; /* First sample of column 0 of X */ for( j = 1; j < order; j++ ) { energy = SKP_SUB32( energy, SKP_RSHIFT32( SKP_SMULBB( ptr1[ L - j ], ptr1[ L - j ] ), rshifts_local ) ); energy = SKP_ADD32( energy, SKP_RSHIFT32( SKP_SMULBB( ptr1[ -j ], ptr1[ -j ] ), rshifts_local ) ); matrix_ptr( XX, j, j, order ) = energy; } ptr2 = &x[ order - 2 ]; /* First sample of column 1 of X */ /* Calculate the remaining elements of the correlation matrix */ if( rshifts_local > 0 ) { /* Right shifting used */ for( lag = 1; lag < order; lag++ ) { /* Inner product of column 0 and column lag: X[:,0]'*X[:,lag] */ energy = 0; for( i = 0; i < L; i++ ) { energy += SKP_RSHIFT32( SKP_SMULBB( ptr1[ i ], ptr2[i] ), rshifts_local ); } /* Calculate remaining off diagonal: X[:,j]'*X[:,j + lag] */ matrix_ptr( XX, lag, 0, order ) = energy; matrix_ptr( XX, 0, lag, order ) = energy; for( j = 1; j < ( order - lag ); j++ ) { energy = SKP_SUB32( energy, SKP_RSHIFT32( SKP_SMULBB( ptr1[ L - j ], ptr2[ L - j ] ), rshifts_local ) ); energy = SKP_ADD32( energy, SKP_RSHIFT32( SKP_SMULBB( ptr1[ -j ], ptr2[ -j ] ), rshifts_local ) ); matrix_ptr( XX, lag + j, j, order ) = energy; matrix_ptr( XX, j, lag + j, order ) = energy; } ptr2--; /* Update pointer to first sample of next column (lag) in X */ } } else { for( lag = 1; lag < order; lag++ ) { /* Inner product of column 0 and column lag: X[:,0]'*X[:,lag] */ energy = SKP_Silk_inner_prod_aligned( ptr1, ptr2, L ); matrix_ptr( XX, lag, 0, order ) = energy; matrix_ptr( XX, 0, lag, order ) = energy; /* Calculate remaining off diagonal: X[:,j]'*X[:,j + lag] */ for( j = 1; j < ( order - lag ); j++ ) { energy = SKP_SUB32( energy, SKP_SMULBB( ptr1[ L - j ], ptr2[ L - j ] ) ); energy = SKP_SMLABB( energy, ptr1[ -j ], ptr2[ -j ] ); matrix_ptr( XX, lag + j, j, order ) = energy; matrix_ptr( XX, j, lag + j, order ) = energy; } ptr2--;/* Update pointer to first sample of next column (lag) in X */ } } *rshifts = rshifts_local; }
void SKP_Silk_PLC_conceal( SKP_Silk_decoder_state *psDec, /* I/O Decoder state */ SKP_Silk_decoder_control *psDecCtrl, /* I/O Decoder control */ SKP_int16 signal[], /* O concealed signal */ SKP_int length /* I length of residual */ ) { SKP_int i, j, k; SKP_int16 *B_Q14, exc_buf[ MAX_FRAME_LENGTH ], *exc_buf_ptr; SKP_int16 rand_scale_Q14, A_Q12_tmp[ MAX_LPC_ORDER ]; SKP_int32 rand_seed, harm_Gain_Q15, rand_Gain_Q15; SKP_int lag, idx, shift1, shift2; SKP_int32 energy1, energy2, *rand_ptr, *pred_lag_ptr, Atmp; SKP_int32 sig_Q10[ MAX_FRAME_LENGTH ], *sig_Q10_ptr, LPC_exc_Q10, LPC_pred_Q10, LTP_pred_Q14; SKP_Silk_PLC_struct *psPLC; psPLC = &psDec->sPLC; /* Update LTP buffer */ SKP_memcpy( psDec->sLTP_Q16, &psDec->sLTP_Q16[ psDec->frame_length ], psDec->frame_length * sizeof( SKP_int32 ) ); /* LPC concealment. Apply BWE to previous LPC */ SKP_Silk_bwexpander( psPLC->prevLPC_Q12, psDec->LPC_order, BWE_COEF_Q16 ); /* Find random noise component */ /* Scale previous excitation signal */ exc_buf_ptr = exc_buf; for( k = ( NB_SUBFR >> 1 ); k < NB_SUBFR; k++ ) { for( i = 0; i < psDec->subfr_length; i++ ) { exc_buf_ptr[ i ] = ( SKP_int16 )SKP_RSHIFT( SKP_SMULWW( psDec->exc_Q10[ i + k * psDec->subfr_length ], psPLC->prevGain_Q16[ k ] ), 10 ); } exc_buf_ptr += psDec->subfr_length; } /* Find the subframe with lowest energy of the last two and use that as random noise generator */ SKP_Silk_sum_sqr_shift( &energy1, &shift1, exc_buf, psDec->subfr_length ); SKP_Silk_sum_sqr_shift( &energy2, &shift2, &exc_buf[ psDec->subfr_length ], psDec->subfr_length ); if( SKP_RSHIFT( energy1, shift2 ) < SKP_RSHIFT( energy1, shift2 ) ) { /* First sub-frame has lowest energy */ rand_ptr = &psDec->exc_Q10[ SKP_max_int( 0, 3 * psDec->subfr_length - RAND_BUF_SIZE ) ]; } else { /* Second sub-frame has lowest energy */ rand_ptr = &psDec->exc_Q10[ SKP_max_int( 0, psDec->frame_length - RAND_BUF_SIZE ) ]; } /* Setup Gain to random noise component */ B_Q14 = psPLC->LTPCoef_Q14; rand_scale_Q14 = psPLC->randScale_Q14; /* Setup attenuation gains */ harm_Gain_Q15 = HARM_ATT_Q15[ SKP_min_int( NB_ATT - 1, psDec->lossCnt ) ]; if( psDec->prev_sigtype == SIG_TYPE_VOICED ) { rand_Gain_Q15 = PLC_RAND_ATTENUATE_V_Q15[ SKP_min_int( NB_ATT - 1, psDec->lossCnt ) ]; } else { rand_Gain_Q15 = PLC_RAND_ATTENUATE_UV_Q15[ SKP_min_int( NB_ATT - 1, psDec->lossCnt ) ]; } /* First Lost frame */ if( psDec->lossCnt == 0 ) { rand_scale_Q14 = (1 << 14 ); /* Reduce random noise Gain for voiced frames */ if( psDec->prev_sigtype == SIG_TYPE_VOICED ) { for( i = 0; i < LTP_ORDER; i++ ) { rand_scale_Q14 -= B_Q14[ i ]; } rand_scale_Q14 = SKP_max_16( 3277, rand_scale_Q14 ); /* 0.2 */ rand_scale_Q14 = ( SKP_int16 )SKP_RSHIFT( SKP_SMULBB( rand_scale_Q14, psPLC->prevLTP_scale_Q14 ), 14 ); } /* Reduce random noise for unvoiced frames with high LPC gain */ if( psDec->prev_sigtype == SIG_TYPE_UNVOICED ) { SKP_int32 invGain_Q30, down_scale_Q30; SKP_Silk_LPC_inverse_pred_gain( &invGain_Q30, psPLC->prevLPC_Q12, psDec->LPC_order ); down_scale_Q30 = SKP_min_32( SKP_RSHIFT( ( 1 << 30 ), LOG2_INV_LPC_GAIN_HIGH_THRES ), invGain_Q30 ); down_scale_Q30 = SKP_max_32( SKP_RSHIFT( ( 1 << 30 ), LOG2_INV_LPC_GAIN_LOW_THRES ), down_scale_Q30 ); down_scale_Q30 = SKP_LSHIFT( down_scale_Q30, LOG2_INV_LPC_GAIN_HIGH_THRES ); rand_Gain_Q15 = SKP_RSHIFT( SKP_SMULWB( down_scale_Q30, rand_Gain_Q15 ), 14 ); } } rand_seed = psPLC->rand_seed; lag = SKP_RSHIFT_ROUND( psPLC->pitchL_Q8, 8 ); psDec->sLTP_buf_idx = psDec->frame_length; /***************************/ /* LTP synthesis filtering */ /***************************/ sig_Q10_ptr = sig_Q10; for( k = 0; k < NB_SUBFR; k++ ) { /* Setup pointer */ pred_lag_ptr = &psDec->sLTP_Q16[ psDec->sLTP_buf_idx - lag + LTP_ORDER / 2 ]; for( i = 0; i < psDec->subfr_length; i++ ) { rand_seed = SKP_RAND( rand_seed ); idx = SKP_RSHIFT( rand_seed, 25 ) & RAND_BUF_MASK; /* Unrolled loop */ LTP_pred_Q14 = SKP_SMULWB( pred_lag_ptr[ 0 ], B_Q14[ 0 ] ); LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -1 ], B_Q14[ 1 ] ); LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -2 ], B_Q14[ 2 ] ); LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -3 ], B_Q14[ 3 ] ); LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], B_Q14[ 4 ] ); pred_lag_ptr++; /* Generate LPC residual */ LPC_exc_Q10 = SKP_LSHIFT( SKP_SMULWB( rand_ptr[ idx ], rand_scale_Q14 ), 2 ); /* Random noise part */ LPC_exc_Q10 = SKP_ADD32( LPC_exc_Q10, SKP_RSHIFT_ROUND( LTP_pred_Q14, 4 ) ); /* Harmonic part */ /* Update states */ psDec->sLTP_Q16[ psDec->sLTP_buf_idx ] = SKP_LSHIFT( LPC_exc_Q10, 6 ); psDec->sLTP_buf_idx++; /* Save LPC residual */ sig_Q10_ptr[ i ] = LPC_exc_Q10; } sig_Q10_ptr += psDec->subfr_length; /* Gradually reduce LTP gain */ for( j = 0; j < LTP_ORDER; j++ ) { B_Q14[ j ] = SKP_RSHIFT( SKP_SMULBB( harm_Gain_Q15, B_Q14[ j ] ), 15 ); } /* Gradually reduce excitation gain */ rand_scale_Q14 = SKP_RSHIFT( SKP_SMULBB( rand_scale_Q14, rand_Gain_Q15 ), 15 ); /* Slowly increase pitch lag */ psPLC->pitchL_Q8 += SKP_SMULWB( psPLC->pitchL_Q8, PITCH_DRIFT_FAC_Q16 ); psPLC->pitchL_Q8 = SKP_min_32( psPLC->pitchL_Q8, SKP_LSHIFT( SKP_SMULBB( MAX_PITCH_LAG_MS, psDec->fs_kHz ), 8 ) ); lag = SKP_RSHIFT_ROUND( psPLC->pitchL_Q8, 8 ); } /***************************/ /* LPC synthesis filtering */ /***************************/ sig_Q10_ptr = sig_Q10; /* Preload LPC coeficients to array on stack. Gives small performance gain */ SKP_memcpy( A_Q12_tmp, psPLC->prevLPC_Q12, psDec->LPC_order * sizeof( SKP_int16 ) ); SKP_assert( psDec->LPC_order >= 10 ); /* check that unrolling works */ for( k = 0; k < NB_SUBFR; k++ ) { for( i = 0; i < psDec->subfr_length; i++ ){ /* unrolled */ Atmp = *( ( SKP_int32* )&A_Q12_tmp[ 0 ] ); /* read two coefficients at once */ LPC_pred_Q10 = SKP_SMULWB( psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 1 ], Atmp ); LPC_pred_Q10 = SKP_SMLAWT( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 2 ], Atmp ); Atmp = *( ( SKP_int32* )&A_Q12_tmp[ 2 ] ); LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 3 ], Atmp ); LPC_pred_Q10 = SKP_SMLAWT( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 4 ], Atmp ); Atmp = *( ( SKP_int32* )&A_Q12_tmp[ 4 ] ); LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 5 ], Atmp ); LPC_pred_Q10 = SKP_SMLAWT( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 6 ], Atmp ); Atmp = *( ( SKP_int32* )&A_Q12_tmp[ 6 ] ); LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 7 ], Atmp ); LPC_pred_Q10 = SKP_SMLAWT( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 8 ], Atmp ); Atmp = *( ( SKP_int32* )&A_Q12_tmp[ 8 ] ); LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 9 ], Atmp ); LPC_pred_Q10 = SKP_SMLAWT( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 10 ], Atmp ); for( j = 10 ; j < psDec->LPC_order ; j+=2 ) { Atmp = *( ( SKP_int32* )&A_Q12_tmp[ j ] ); LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 1 - j ], Atmp ); LPC_pred_Q10 = SKP_SMLAWT( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 2 - j ], Atmp ); } /* Add prediction to LPC residual */ sig_Q10_ptr[ i ] = SKP_ADD32( sig_Q10_ptr[ i ], LPC_pred_Q10 ); /* Update states */ psDec->sLPC_Q14[ MAX_LPC_ORDER + i ] = SKP_LSHIFT( sig_Q10_ptr[ i ], 4 ); } sig_Q10_ptr += psDec->subfr_length; /* Update LPC filter state */ SKP_memcpy( psDec->sLPC_Q14, &psDec->sLPC_Q14[ psDec->subfr_length ], MAX_LPC_ORDER * sizeof( SKP_int32 ) ); } /* Scale with Gain */ for( i = 0; i < psDec->frame_length; i++ ) { signal[ i ] = ( SKP_int16 )SKP_SAT16( SKP_RSHIFT_ROUND( SKP_SMULWW( sig_Q10[ i ], psPLC->prevGain_Q16[ NB_SUBFR - 1 ] ), 10 ) ); } /**************************************/ /* Update states */ /**************************************/ psPLC->rand_seed = rand_seed; psPLC->randScale_Q14 = rand_scale_Q14; for( i = 0; i < NB_SUBFR; i++ ) { psDecCtrl->pitchL[ i ] = lag; } }
void SKP_Silk_noise_shape_analysis_FIX( SKP_Silk_encoder_state_FIX *psEnc, /* I/O Encoder state FIX */ SKP_Silk_encoder_control_FIX *psEncCtrl, /* I/O Encoder control FIX */ const SKP_int16 *pitch_res, /* I LPC residual from pitch analysis */ const SKP_int16 *x /* I Input signal [ frame_length + la_shape ] */ ) { SKP_Silk_shape_state_FIX *psShapeSt = &psEnc->sShape; SKP_int k, i, nSamples, Qnrg, b_Q14, warping_Q16, scale = 0; SKP_int32 SNR_adj_dB_Q7, HarmBoost_Q16, HarmShapeGain_Q16, Tilt_Q16, tmp32; SKP_int32 nrg, pre_nrg_Q30, log_energy_Q7, log_energy_prev_Q7, energy_variation_Q7; SKP_int32 delta_Q16, BWExp1_Q16, BWExp2_Q16, gain_mult_Q16, gain_add_Q16, strength_Q16, b_Q8; SKP_int32 auto_corr[ MAX_SHAPE_LPC_ORDER + 1 ]; SKP_int32 refl_coef_Q16[ MAX_SHAPE_LPC_ORDER ]; SKP_int32 AR1_Q24[ MAX_SHAPE_LPC_ORDER ]; SKP_int32 AR2_Q24[ MAX_SHAPE_LPC_ORDER ]; SKP_int16 x_windowed[ SHAPE_LPC_WIN_MAX ]; const SKP_int16 *x_ptr, *pitch_res_ptr; SKP_int32 sqrt_nrg[ NB_SUBFR ], Qnrg_vec[ NB_SUBFR ]; /* Point to start of first LPC analysis block */ x_ptr = x - psEnc->sCmn.la_shape; /****************/ /* CONTROL SNR */ /****************/ /* Reduce SNR_dB values if recent bitstream has exceeded TargetRate */ psEncCtrl->current_SNR_dB_Q7 = psEnc->SNR_dB_Q7 - SKP_SMULWB( SKP_LSHIFT( ( SKP_int32 )psEnc->BufferedInChannel_ms, 7 ), SKP_FIX_CONST( 0.05, 16 ) ); /* Reduce SNR_dB if inband FEC used */ if( psEnc->speech_activity_Q8 > SKP_FIX_CONST( LBRR_SPEECH_ACTIVITY_THRES, 8 ) ) { psEncCtrl->current_SNR_dB_Q7 -= SKP_RSHIFT( psEnc->inBandFEC_SNR_comp_Q8, 1 ); } /****************/ /* GAIN CONTROL */ /****************/ /* Input quality is the average of the quality in the lowest two VAD bands */ psEncCtrl->input_quality_Q14 = ( SKP_int )SKP_RSHIFT( ( SKP_int32 )psEncCtrl->input_quality_bands_Q15[ 0 ] + psEncCtrl->input_quality_bands_Q15[ 1 ], 2 ); /* Coding quality level, between 0.0_Q0 and 1.0_Q0, but in Q14 */ psEncCtrl->coding_quality_Q14 = SKP_RSHIFT( SKP_Silk_sigm_Q15( SKP_RSHIFT_ROUND( psEncCtrl->current_SNR_dB_Q7 - SKP_FIX_CONST( 18.0, 7 ), 4 ) ), 1 ); /* Reduce coding SNR during low speech activity */ b_Q8 = SKP_FIX_CONST( 1.0, 8 ) - psEnc->speech_activity_Q8; b_Q8 = SKP_SMULWB( SKP_LSHIFT( b_Q8, 8 ), b_Q8 ); SNR_adj_dB_Q7 = SKP_SMLAWB( psEncCtrl->current_SNR_dB_Q7, SKP_SMULBB( SKP_FIX_CONST( -BG_SNR_DECR_dB, 7 ) >> ( 4 + 1 ), b_Q8 ), // Q11 SKP_SMULWB( SKP_FIX_CONST( 1.0, 14 ) + psEncCtrl->input_quality_Q14, psEncCtrl->coding_quality_Q14 ) ); // Q12 if( psEncCtrl->sCmn.sigtype == SIG_TYPE_VOICED ) { /* Reduce gains for periodic signals */ SNR_adj_dB_Q7 = SKP_SMLAWB( SNR_adj_dB_Q7, SKP_FIX_CONST( HARM_SNR_INCR_dB, 8 ), psEnc->LTPCorr_Q15 ); } else { /* For unvoiced signals and low-quality input, adjust the quality slower than SNR_dB setting */ SNR_adj_dB_Q7 = SKP_SMLAWB( SNR_adj_dB_Q7, SKP_SMLAWB( SKP_FIX_CONST( 6.0, 9 ), -SKP_FIX_CONST( 0.4, 18 ), psEncCtrl->current_SNR_dB_Q7 ), SKP_FIX_CONST( 1.0, 14 ) - psEncCtrl->input_quality_Q14 ); } /*************************/ /* SPARSENESS PROCESSING */ /*************************/ /* Set quantizer offset */ if( psEncCtrl->sCmn.sigtype == SIG_TYPE_VOICED ) { /* Initally set to 0; may be overruled in process_gains(..) */ psEncCtrl->sCmn.QuantOffsetType = 0; psEncCtrl->sparseness_Q8 = 0; } else { /* Sparseness measure, based on relative fluctuations of energy per 2 milliseconds */ nSamples = SKP_LSHIFT( psEnc->sCmn.fs_kHz, 1 ); energy_variation_Q7 = 0; log_energy_prev_Q7 = 0; pitch_res_ptr = pitch_res; for( k = 0; k < FRAME_LENGTH_MS / 2; k++ ) { SKP_Silk_sum_sqr_shift( &nrg, &scale, pitch_res_ptr, nSamples ); nrg += SKP_RSHIFT( nSamples, scale ); // Q(-scale) log_energy_Q7 = SKP_Silk_lin2log( nrg ); if( k > 0 ) { energy_variation_Q7 += SKP_abs( log_energy_Q7 - log_energy_prev_Q7 ); } log_energy_prev_Q7 = log_energy_Q7; pitch_res_ptr += nSamples; } psEncCtrl->sparseness_Q8 = SKP_RSHIFT( SKP_Silk_sigm_Q15( SKP_SMULWB( energy_variation_Q7 - SKP_FIX_CONST( 5.0, 7 ), SKP_FIX_CONST( 0.1, 16 ) ) ), 7 ); /* Set quantization offset depending on sparseness measure */ if( psEncCtrl->sparseness_Q8 > SKP_FIX_CONST( SPARSENESS_THRESHOLD_QNT_OFFSET, 8 ) ) { psEncCtrl->sCmn.QuantOffsetType = 0; } else { psEncCtrl->sCmn.QuantOffsetType = 1; } /* Increase coding SNR for sparse signals */ SNR_adj_dB_Q7 = SKP_SMLAWB( SNR_adj_dB_Q7, SKP_FIX_CONST( SPARSE_SNR_INCR_dB, 15 ), psEncCtrl->sparseness_Q8 - SKP_FIX_CONST( 0.5, 8 ) ); } /*******************************/ /* Control bandwidth expansion */ /*******************************/ /* More BWE for signals with high prediction gain */ strength_Q16 = SKP_SMULWB( psEncCtrl->predGain_Q16, SKP_FIX_CONST( FIND_PITCH_WHITE_NOISE_FRACTION, 16 ) ); BWExp1_Q16 = BWExp2_Q16 = SKP_DIV32_varQ( SKP_FIX_CONST( BANDWIDTH_EXPANSION, 16 ), SKP_SMLAWW( SKP_FIX_CONST( 1.0, 16 ), strength_Q16, strength_Q16 ), 16 ); delta_Q16 = SKP_SMULWB( SKP_FIX_CONST( 1.0, 16 ) - SKP_SMULBB( 3, psEncCtrl->coding_quality_Q14 ), SKP_FIX_CONST( LOW_RATE_BANDWIDTH_EXPANSION_DELTA, 16 ) ); BWExp1_Q16 = SKP_SUB32( BWExp1_Q16, delta_Q16 ); BWExp2_Q16 = SKP_ADD32( BWExp2_Q16, delta_Q16 ); /* BWExp1 will be applied after BWExp2, so make it relative */ BWExp1_Q16 = SKP_DIV32_16( SKP_LSHIFT( BWExp1_Q16, 14 ), SKP_RSHIFT( BWExp2_Q16, 2 ) ); if( psEnc->sCmn.warping_Q16 > 0 ) { /* Slightly more warping in analysis will move quantization noise up in frequency, where it's better masked */ warping_Q16 = SKP_SMLAWB( psEnc->sCmn.warping_Q16, psEncCtrl->coding_quality_Q14, SKP_FIX_CONST( 0.01, 18 ) ); } else { warping_Q16 = 0; } /********************************************/ /* Compute noise shaping AR coefs and gains */ /********************************************/ for( k = 0; k < NB_SUBFR; k++ ) { /* Apply window: sine slope followed by flat part followed by cosine slope */ SKP_int shift, slope_part, flat_part; flat_part = psEnc->sCmn.fs_kHz * 5; slope_part = SKP_RSHIFT( psEnc->sCmn.shapeWinLength - flat_part, 1 ); SKP_Silk_apply_sine_window_new( x_windowed, x_ptr, 1, slope_part ); shift = slope_part; SKP_memcpy( x_windowed + shift, x_ptr + shift, flat_part * sizeof(SKP_int16) ); shift += flat_part; SKP_Silk_apply_sine_window_new( x_windowed + shift, x_ptr + shift, 2, slope_part ); /* Update pointer: next LPC analysis block */ x_ptr += psEnc->sCmn.subfr_length; if( psEnc->sCmn.warping_Q16 > 0 ) { /* Calculate warped auto correlation */ SKP_Silk_warped_autocorrelation_FIX( auto_corr, &scale, x_windowed, warping_Q16, psEnc->sCmn.shapeWinLength, psEnc->sCmn.shapingLPCOrder ); } else { /* Calculate regular auto correlation */ SKP_Silk_autocorr( auto_corr, &scale, x_windowed, psEnc->sCmn.shapeWinLength, psEnc->sCmn.shapingLPCOrder + 1 ); } /* Add white noise, as a fraction of energy */ auto_corr[0] = SKP_ADD32( auto_corr[0], SKP_max_32( SKP_SMULWB( SKP_RSHIFT( auto_corr[ 0 ], 4 ), SKP_FIX_CONST( SHAPE_WHITE_NOISE_FRACTION, 20 ) ), 1 ) ); /* Calculate the reflection coefficients using schur */ nrg = SKP_Silk_schur64( refl_coef_Q16, auto_corr, psEnc->sCmn.shapingLPCOrder ); SKP_assert( nrg >= 0 ); /* Convert reflection coefficients to prediction coefficients */ SKP_Silk_k2a_Q16( AR2_Q24, refl_coef_Q16, psEnc->sCmn.shapingLPCOrder ); Qnrg = -scale; // range: -12...30 SKP_assert( Qnrg >= -12 ); SKP_assert( Qnrg <= 30 ); /* Make sure that Qnrg is an even number */ if( Qnrg & 1 ) { Qnrg -= 1; nrg >>= 1; } tmp32 = SKP_Silk_SQRT_APPROX( nrg ); Qnrg >>= 1; // range: -6...15 sqrt_nrg[ k ] = tmp32; Qnrg_vec[ k ] = Qnrg; psEncCtrl->Gains_Q16[ k ] = SKP_LSHIFT_SAT32( tmp32, 16 - Qnrg ); if( psEnc->sCmn.warping_Q16 > 0 ) { /* Adjust gain for warping */ gain_mult_Q16 = warped_gain( AR2_Q24, warping_Q16, psEnc->sCmn.shapingLPCOrder ); SKP_assert( psEncCtrl->Gains_Q16[ k ] >= 0 ); psEncCtrl->Gains_Q16[ k ] = SKP_SMULWW( psEncCtrl->Gains_Q16[ k ], gain_mult_Q16 ); if( psEncCtrl->Gains_Q16[ k ] < 0 ) { psEncCtrl->Gains_Q16[ k ] = SKP_int32_MAX; } } /* Bandwidth expansion for synthesis filter shaping */ SKP_Silk_bwexpander_32( AR2_Q24, psEnc->sCmn.shapingLPCOrder, BWExp2_Q16 ); /* Compute noise shaping filter coefficients */ SKP_memcpy( AR1_Q24, AR2_Q24, psEnc->sCmn.shapingLPCOrder * sizeof( SKP_int32 ) ); /* Bandwidth expansion for analysis filter shaping */ SKP_assert( BWExp1_Q16 <= SKP_FIX_CONST( 1.0, 16 ) ); SKP_Silk_bwexpander_32( AR1_Q24, psEnc->sCmn.shapingLPCOrder, BWExp1_Q16 ); /* Ratio of prediction gains, in energy domain */ SKP_Silk_LPC_inverse_pred_gain_Q24( &pre_nrg_Q30, AR2_Q24, psEnc->sCmn.shapingLPCOrder ); SKP_Silk_LPC_inverse_pred_gain_Q24( &nrg, AR1_Q24, psEnc->sCmn.shapingLPCOrder ); //psEncCtrl->GainsPre[ k ] = 1.0f - 0.7f * ( 1.0f - pre_nrg / nrg ) = 0.3f + 0.7f * pre_nrg / nrg; pre_nrg_Q30 = SKP_LSHIFT32( SKP_SMULWB( pre_nrg_Q30, SKP_FIX_CONST( 0.7, 15 ) ), 1 ); psEncCtrl->GainsPre_Q14[ k ] = ( SKP_int ) SKP_FIX_CONST( 0.3, 14 ) + SKP_DIV32_varQ( pre_nrg_Q30, nrg, 14 ); /* Convert to monic warped prediction coefficients and limit absolute values */ limit_warped_coefs( AR2_Q24, AR1_Q24, warping_Q16, SKP_FIX_CONST( 3.999, 24 ), psEnc->sCmn.shapingLPCOrder ); /* Convert from Q24 to Q13 and store in int16 */ for( i = 0; i < psEnc->sCmn.shapingLPCOrder; i++ ) { psEncCtrl->AR1_Q13[ k * MAX_SHAPE_LPC_ORDER + i ] = (SKP_int16)SKP_SAT16( SKP_RSHIFT_ROUND( AR1_Q24[ i ], 11 ) ); psEncCtrl->AR2_Q13[ k * MAX_SHAPE_LPC_ORDER + i ] = (SKP_int16)SKP_SAT16( SKP_RSHIFT_ROUND( AR2_Q24[ i ], 11 ) ); } }
/* Compute reflection coefficients from input signal */ void SKP_Silk_burg_modified( SKP_int32 *res_nrg, /* O residual energy */ SKP_int *res_nrg_Q, /* O residual energy Q value */ SKP_int32 A_Q16[], /* O prediction coefficients (length order) */ const SKP_int16 x[], /* I input signal, length: nb_subfr * ( D + subfr_length ) */ const SKP_int subfr_length, /* I input signal subframe length (including D preceeding samples) */ const SKP_int nb_subfr, /* I number of subframes stacked in x */ const SKP_int32 WhiteNoiseFrac_Q32, /* I fraction added to zero-lag autocorrelation */ const SKP_int D /* I order */ ) { SKP_int k, n, s, lz, rshifts, rshifts_extra; SKP_int32 C0, num, nrg, rc_Q31, Atmp_QA, Atmp1, tmp1, tmp2, x1, x2; const SKP_int16 *x_ptr; SKP_int32 C_first_row[ SKP_Silk_MAX_ORDER_LPC ]; SKP_int32 C_last_row[ SKP_Silk_MAX_ORDER_LPC ]; SKP_int32 Af_QA[ SKP_Silk_MAX_ORDER_LPC ]; SKP_int32 CAf[ SKP_Silk_MAX_ORDER_LPC + 1 ]; SKP_int32 CAb[ SKP_Silk_MAX_ORDER_LPC + 1 ]; SKP_assert( subfr_length * nb_subfr <= MAX_FRAME_SIZE ); SKP_assert( nb_subfr <= MAX_NB_SUBFR ); /* Compute autocorrelations, added over subframes */ SKP_Silk_sum_sqr_shift( &C0, &rshifts, x, nb_subfr * subfr_length ); if( rshifts > MAX_RSHIFTS ) { C0 = SKP_LSHIFT32( C0, rshifts - MAX_RSHIFTS ); SKP_assert( C0 > 0 ); rshifts = MAX_RSHIFTS; } else { lz = SKP_Silk_CLZ32( C0 ) - 1; rshifts_extra = N_BITS_HEAD_ROOM - lz; if( rshifts_extra > 0 ) { rshifts_extra = SKP_min( rshifts_extra, MAX_RSHIFTS - rshifts ); C0 = SKP_RSHIFT32( C0, rshifts_extra ); } else { rshifts_extra = SKP_max( rshifts_extra, MIN_RSHIFTS - rshifts ); C0 = SKP_LSHIFT32( C0, -rshifts_extra ); } rshifts += rshifts_extra; } SKP_memset( C_first_row, 0, SKP_Silk_MAX_ORDER_LPC * sizeof( SKP_int32 ) ); if( rshifts > 0 ) { for( s = 0; s < nb_subfr; s++ ) { x_ptr = x + s * subfr_length; for( n = 1; n < D + 1; n++ ) { C_first_row[ n - 1 ] += (SKP_int32)SKP_RSHIFT64( SKP_Silk_inner_prod16_aligned_64( x_ptr, x_ptr + n, subfr_length - n ), rshifts ); } } } else { for( s = 0; s < nb_subfr; s++ ) { x_ptr = x + s * subfr_length; for( n = 1; n < D + 1; n++ ) { C_first_row[ n - 1 ] += SKP_LSHIFT32( SKP_Silk_inner_prod_aligned( x_ptr, x_ptr + n, subfr_length - n ), -rshifts ); } } } SKP_memcpy( C_last_row, C_first_row, SKP_Silk_MAX_ORDER_LPC * sizeof( SKP_int32 ) ); /* Initialize */ CAb[ 0 ] = CAf[ 0 ] = C0 + SKP_SMMUL( WhiteNoiseFrac_Q32, C0 ) + 1; // Q(-rshifts) for( n = 0; n < D; n++ ) { /* Update first row of correlation matrix (without first element) */ /* Update last row of correlation matrix (without last element, stored in reversed order) */ /* Update C * Af */ /* Update C * flipud(Af) (stored in reversed order) */ if( rshifts > -2 ) { for( s = 0; s < nb_subfr; s++ ) { x_ptr = x + s * subfr_length; x1 = -SKP_LSHIFT32( (SKP_int32)x_ptr[ n ], 16 - rshifts ); // Q(16-rshifts) x2 = -SKP_LSHIFT32( (SKP_int32)x_ptr[ subfr_length - n - 1 ], 16 - rshifts ); // Q(16-rshifts) tmp1 = SKP_LSHIFT32( (SKP_int32)x_ptr[ n ], QA - 16 ); // Q(QA-16) tmp2 = SKP_LSHIFT32( (SKP_int32)x_ptr[ subfr_length - n - 1 ], QA - 16 ); // Q(QA-16) for( k = 0; k < n; k++ ) { C_first_row[ k ] = SKP_SMLAWB( C_first_row[ k ], x1, x_ptr[ n - k - 1 ] ); // Q( -rshifts ) C_last_row[ k ] = SKP_SMLAWB( C_last_row[ k ], x2, x_ptr[ subfr_length - n + k ] ); // Q( -rshifts ) Atmp_QA = Af_QA[ k ]; tmp1 = SKP_SMLAWB( tmp1, Atmp_QA, x_ptr[ n - k - 1 ] ); // Q(QA-16) tmp2 = SKP_SMLAWB( tmp2, Atmp_QA, x_ptr[ subfr_length - n + k ] ); // Q(QA-16) } tmp1 = SKP_LSHIFT32( -tmp1, 32 - QA - rshifts ); // Q(16-rshifts) tmp2 = SKP_LSHIFT32( -tmp2, 32 - QA - rshifts ); // Q(16-rshifts) for( k = 0; k <= n; k++ ) { CAf[ k ] = SKP_SMLAWB( CAf[ k ], tmp1, x_ptr[ n - k ] ); // Q( -rshift ) CAb[ k ] = SKP_SMLAWB( CAb[ k ], tmp2, x_ptr[ subfr_length - n + k - 1 ] ); // Q( -rshift ) } } } else { for( s = 0; s < nb_subfr; s++ ) { x_ptr = x + s * subfr_length; x1 = -SKP_LSHIFT32( (SKP_int32)x_ptr[ n ], -rshifts ); // Q( -rshifts ) x2 = -SKP_LSHIFT32( (SKP_int32)x_ptr[ subfr_length - n - 1 ], -rshifts ); // Q( -rshifts ) tmp1 = SKP_LSHIFT32( (SKP_int32)x_ptr[ n ], 17 ); // Q17 tmp2 = SKP_LSHIFT32( (SKP_int32)x_ptr[ subfr_length - n - 1 ], 17 ); // Q17 for( k = 0; k < n; k++ ) { C_first_row[ k ] = SKP_MLA( C_first_row[ k ], x1, x_ptr[ n - k - 1 ] ); // Q( -rshifts ) C_last_row[ k ] = SKP_MLA( C_last_row[ k ], x2, x_ptr[ subfr_length - n + k ] ); // Q( -rshifts ) Atmp1 = SKP_RSHIFT_ROUND( Af_QA[ k ], QA - 17 ); // Q17 tmp1 = SKP_MLA( tmp1, x_ptr[ n - k - 1 ], Atmp1 ); // Q17 tmp2 = SKP_MLA( tmp2, x_ptr[ subfr_length - n + k ], Atmp1 ); // Q17 } tmp1 = -tmp1; // Q17 tmp2 = -tmp2; // Q17 for( k = 0; k <= n; k++ ) { CAf[ k ] = SKP_SMLAWW( CAf[ k ], tmp1, SKP_LSHIFT32( (SKP_int32)x_ptr[ n - k ], -rshifts - 1 ) ); // Q( -rshift ) CAb[ k ] = SKP_SMLAWW( CAb[ k ], tmp2, SKP_LSHIFT32( (SKP_int32)x_ptr[ subfr_length - n + k - 1 ], -rshifts - 1 ) );// Q( -rshift ) } } } /* Calculate nominator and denominator for the next order reflection (parcor) coefficient */ tmp1 = C_first_row[ n ]; // Q( -rshifts ) tmp2 = C_last_row[ n ]; // Q( -rshifts ) num = 0; // Q( -rshifts ) nrg = SKP_ADD32( CAb[ 0 ], CAf[ 0 ] ); // Q( 1-rshifts ) for( k = 0; k < n; k++ ) { Atmp_QA = Af_QA[ k ]; lz = SKP_Silk_CLZ32( SKP_abs( Atmp_QA ) ) - 1; lz = SKP_min( 32 - QA, lz ); Atmp1 = SKP_LSHIFT32( Atmp_QA, lz ); // Q( QA + lz ) tmp1 = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( C_last_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz ); // Q( -rshifts ) tmp2 = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( C_first_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz ); // Q( -rshifts ) num = SKP_ADD_LSHIFT32( num, SKP_SMMUL( CAb[ n - k ], Atmp1 ), 32 - QA - lz ); // Q( -rshifts ) nrg = SKP_ADD_LSHIFT32( nrg, SKP_SMMUL( SKP_ADD32( CAb[ k + 1 ], CAf[ k + 1 ] ), Atmp1 ), 32 - QA - lz ); // Q( 1-rshifts ) } CAf[ n + 1 ] = tmp1; // Q( -rshifts ) CAb[ n + 1 ] = tmp2; // Q( -rshifts ) num = SKP_ADD32( num, tmp2 ); // Q( -rshifts ) num = SKP_LSHIFT32( -num, 1 ); // Q( 1-rshifts ) /* Calculate the next order reflection (parcor) coefficient */ if( SKP_abs( num ) < nrg ) { rc_Q31 = SKP_DIV32_varQ( num, nrg, 31 ); } else { /* Negative energy or ratio too high; set remaining coefficients to zero and exit loop */ SKP_memset( &Af_QA[ n ], 0, ( D - n ) * sizeof( SKP_int32 ) ); SKP_assert( 0 ); break; } /* Update the AR coefficients */ for( k = 0; k < (n + 1) >> 1; k++ ) { tmp1 = Af_QA[ k ]; // QA tmp2 = Af_QA[ n - k - 1 ]; // QA Af_QA[ k ] = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( tmp2, rc_Q31 ), 1 ); // QA Af_QA[ n - k - 1 ] = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( tmp1, rc_Q31 ), 1 ); // QA } Af_QA[ n ] = SKP_RSHIFT32( rc_Q31, 31 - QA ); // QA /* Update C * Af and C * Ab */ for( k = 0; k <= n + 1; k++ ) { tmp1 = CAf[ k ]; // Q( -rshifts ) tmp2 = CAb[ n - k + 1 ]; // Q( -rshifts ) CAf[ k ] = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( tmp2, rc_Q31 ), 1 ); // Q( -rshifts ) CAb[ n - k + 1 ] = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( tmp1, rc_Q31 ), 1 ); // Q( -rshifts ) } } /* Return residual energy */ nrg = CAf[ 0 ]; // Q( -rshifts ) tmp1 = 1 << 16; // Q16 for( k = 0; k < D; k++ ) { Atmp1 = SKP_RSHIFT_ROUND( Af_QA[ k ], QA - 16 ); // Q16 nrg = SKP_SMLAWW( nrg, CAf[ k + 1 ], Atmp1 ); // Q( -rshifts ) tmp1 = SKP_SMLAWW( tmp1, Atmp1, Atmp1 ); // Q16 A_Q16[ k ] = -Atmp1; } *res_nrg = SKP_SMLAWW( nrg, SKP_SMMUL( WhiteNoiseFrac_Q32, C0 ), -tmp1 ); // Q( -rshifts ) *res_nrg_Q = -rshifts; }
/* Finds LPC vector from correlations, and converts to NLSF */ void SKP_Silk_find_LPC_FIX( SKP_int NLSF_Q15[], /* O NLSFs */ SKP_int *interpIndex, /* O NLSF interpolation index, only used for NLSF interpolation */ const SKP_int prev_NLSFq_Q15[], /* I previous NLSFs, only used for NLSF interpolation */ const SKP_int useInterpolatedNLSFs, /* I Flag */ const SKP_int LPC_order, /* I LPC order */ const SKP_int16 x[], /* I Input signal */ const SKP_int subfr_length /* I Input signal subframe length including preceeding samples */ ) { SKP_int k; SKP_int32 a_Q16[ MAX_LPC_ORDER ]; SKP_int isInterpLower, shift; SKP_int16 S[ MAX_LPC_ORDER ]; SKP_int32 res_nrg0, res_nrg1; SKP_int rshift0, rshift1; /* Used only for LSF interpolation */ SKP_int32 a_tmp_Q16[ MAX_LPC_ORDER ], res_nrg_interp, res_nrg, res_tmp_nrg; SKP_int res_nrg_interp_Q, res_nrg_Q, res_tmp_nrg_Q; SKP_int16 a_tmp_Q12[ MAX_LPC_ORDER ]; SKP_int NLSF0_Q15[ MAX_LPC_ORDER ]; SKP_int16 LPC_res[ ( MAX_FRAME_LENGTH + NB_SUBFR * MAX_LPC_ORDER ) / 2 ]; /* Default: no interpolation */ *interpIndex = 4; /* Burg AR analysis for the full frame */ SKP_Silk_burg_modified( &res_nrg, &res_nrg_Q, a_Q16, x, subfr_length, NB_SUBFR, SKP_FIX_CONST( FIND_LPC_COND_FAC, 32 ), LPC_order ); SKP_Silk_bwexpander_32( a_Q16, LPC_order, SKP_FIX_CONST( FIND_LPC_CHIRP, 16 ) ); if( useInterpolatedNLSFs == 1 ) { /* Optimal solution for last 10 ms */ SKP_Silk_burg_modified( &res_tmp_nrg, &res_tmp_nrg_Q, a_tmp_Q16, x + ( NB_SUBFR >> 1 ) * subfr_length, subfr_length, ( NB_SUBFR >> 1 ), SKP_FIX_CONST( FIND_LPC_COND_FAC, 32 ), LPC_order ); SKP_Silk_bwexpander_32( a_tmp_Q16, LPC_order, SKP_FIX_CONST( FIND_LPC_CHIRP, 16 ) ); /* subtract residual energy here, as that's easier than adding it to the */ /* residual energy of the first 10 ms in each iteration of the search below */ shift = res_tmp_nrg_Q - res_nrg_Q; if( shift >= 0 ) { if( shift < 32 ) { res_nrg = res_nrg - SKP_RSHIFT( res_tmp_nrg, shift ); } } else { SKP_assert( shift > -32 ); res_nrg = SKP_RSHIFT( res_nrg, -shift ) - res_tmp_nrg; res_nrg_Q = res_tmp_nrg_Q; } /* Convert to NLSFs */ SKP_Silk_A2NLSF( NLSF_Q15, a_tmp_Q16, LPC_order ); /* Search over interpolation indices to find the one with lowest residual energy */ for( k = 3; k >= 0; k-- ) { /* Interpolate NLSFs for first half */ SKP_Silk_interpolate( NLSF0_Q15, prev_NLSFq_Q15, NLSF_Q15, k, LPC_order ); /* Convert to LPC for residual energy evaluation */ SKP_Silk_NLSF2A_stable( a_tmp_Q12, NLSF0_Q15, LPC_order ); /* Calculate residual energy with NLSF interpolation */ SKP_memset( S, 0, LPC_order * sizeof( SKP_int16 ) ); SKP_Silk_LPC_analysis_filter( x, a_tmp_Q12, S, LPC_res, 2 * subfr_length, LPC_order ); SKP_Silk_sum_sqr_shift( &res_nrg0, &rshift0, LPC_res + LPC_order, subfr_length - LPC_order ); SKP_Silk_sum_sqr_shift( &res_nrg1, &rshift1, LPC_res + LPC_order + subfr_length, subfr_length - LPC_order ); /* Add subframe energies from first half frame */ shift = rshift0 - rshift1; if( shift >= 0 ) { res_nrg1 = SKP_RSHIFT( res_nrg1, shift ); res_nrg_interp_Q = -rshift0; } else { res_nrg0 = SKP_RSHIFT( res_nrg0, -shift ); res_nrg_interp_Q = -rshift1; } res_nrg_interp = SKP_ADD32( res_nrg0, res_nrg1 ); /* Compare with first half energy without NLSF interpolation, or best interpolated value so far */ shift = res_nrg_interp_Q - res_nrg_Q; if( shift >= 0 ) { if( SKP_RSHIFT( res_nrg_interp, shift ) < res_nrg ) { isInterpLower = SKP_TRUE; } else { isInterpLower = SKP_FALSE; } } else { if( -shift < 32 ) { if( res_nrg_interp < SKP_RSHIFT( res_nrg, -shift ) ) { isInterpLower = SKP_TRUE; } else { isInterpLower = SKP_FALSE; } } else { isInterpLower = SKP_FALSE; } } /* Determine whether current interpolated NLSFs are best so far */ if( isInterpLower == SKP_TRUE ) { /* Interpolation has lower residual energy */ res_nrg = res_nrg_interp; res_nrg_Q = res_nrg_interp_Q; *interpIndex = k; } } }
void SKP_Silk_noise_shape_analysis_FIX( SKP_Silk_encoder_state_FIX *psEnc, /* I/O Encoder state FIX */ SKP_Silk_encoder_control_FIX *psEncCtrl, /* I/O Encoder control FIX */ const SKP_int16 *pitch_res, /* I LPC residual from pitch analysis */ const SKP_int16 *x /* I Input signal [ 2 * frame_length + la_shape ]*/ ) { SKP_Silk_shape_state_FIX *psShapeSt = &psEnc->sShape; SKP_int k, nSamples, lz, Qnrg, b_Q14, scale = 0, sz; SKP_int32 SNR_adj_dB_Q7, HarmBoost_Q16, HarmShapeGain_Q16, Tilt_Q16, tmp32; SKP_int32 nrg, pre_nrg_Q30, log_energy_Q7, log_energy_prev_Q7, energy_variation_Q7; SKP_int32 delta_Q16, BWExp1_Q16, BWExp2_Q16, gain_mult_Q16, gain_add_Q16, strength_Q16, b_Q8; SKP_int32 auto_corr[ SHAPE_LPC_ORDER_MAX + 1 ]; SKP_int32 refl_coef_Q16[ SHAPE_LPC_ORDER_MAX ]; SKP_int32 AR_Q24[ SHAPE_LPC_ORDER_MAX ]; SKP_int16 x_windowed[ SHAPE_LPC_WIN_MAX ]; const SKP_int16 *x_ptr, *pitch_res_ptr; SKP_int32 sqrt_nrg[ NB_SUBFR ], Qnrg_vec[ NB_SUBFR ]; /* Point to start of first LPC analysis block */ x_ptr = x + psEnc->sCmn.la_shape - SKP_SMULBB( SHAPE_LPC_WIN_MS, psEnc->sCmn.fs_kHz ) + psEnc->sCmn.frame_length / NB_SUBFR; /****************/ /* CONTROL SNR */ /****************/ /* Reduce SNR_dB values if recent bitstream has exceeded TargetRate */ psEncCtrl->current_SNR_dB_Q7 = psEnc->SNR_dB_Q7 - SKP_SMULWB( SKP_LSHIFT( ( SKP_int32 )psEnc->BufferedInChannel_ms, 7 ), 3277 ); /* Reduce SNR_dB if inband FEC used */ if( psEnc->speech_activity_Q8 > LBRR_SPEECH_ACTIVITY_THRES_Q8 ) { psEncCtrl->current_SNR_dB_Q7 -= SKP_RSHIFT( psEnc->inBandFEC_SNR_comp_Q8, 1 ); } /****************/ /* GAIN CONTROL */ /****************/ /* Input quality is the average of the quality in the lowest two VAD bands */ psEncCtrl->input_quality_Q14 = ( SKP_int )SKP_RSHIFT( ( SKP_int32 )psEncCtrl->input_quality_bands_Q15[ 0 ] + psEncCtrl->input_quality_bands_Q15[ 1 ], 2 ); /* Coding quality level, between 0.0_Q0 and 1.0_Q0, but in Q14 */ psEncCtrl->coding_quality_Q14 = SKP_RSHIFT( SKP_Silk_sigm_Q15( SKP_RSHIFT_ROUND( psEncCtrl->current_SNR_dB_Q7 - ( 18 << 7 ), 4 ) ), 1 ); /* Reduce coding SNR during low speech activity */ b_Q8 = ( 1 << 8 ) - psEnc->speech_activity_Q8; b_Q8 = SKP_SMULWB( SKP_LSHIFT( b_Q8, 8 ), b_Q8 ); SNR_adj_dB_Q7 = SKP_SMLAWB( psEncCtrl->current_SNR_dB_Q7, SKP_SMULBB( -BG_SNR_DECR_dB_Q7 >> ( 4 + 1 ), b_Q8 ), // Q11 SKP_SMULWB( ( 1 << 14 ) + psEncCtrl->input_quality_Q14, psEncCtrl->coding_quality_Q14 ) ); // Q12 if( psEncCtrl->sCmn.sigtype == SIG_TYPE_VOICED ) { /* Reduce gains for periodic signals */ SNR_adj_dB_Q7 = SKP_SMLAWB( SNR_adj_dB_Q7, HARM_SNR_INCR_dB_Q7 << 1, psEnc->LTPCorr_Q15 ); } else { /* For unvoiced signals and low-quality input, adjust the quality slower than SNR_dB setting */ SNR_adj_dB_Q7 = SKP_SMLAWB( SNR_adj_dB_Q7, SKP_SMLAWB( 6 << ( 7 + 2 ), -104856, psEncCtrl->current_SNR_dB_Q7 ), //-104856_Q18 = -0.4_Q0, Q9 ( 1 << 14 ) - psEncCtrl->input_quality_Q14 ); // Q14 } /*************************/ /* SPARSENESS PROCESSING */ /*************************/ /* Set quantizer offset */ if( psEncCtrl->sCmn.sigtype == SIG_TYPE_VOICED ) { /* Initally set to 0; may be overruled in process_gains(..) */ psEncCtrl->sCmn.QuantOffsetType = 0; psEncCtrl->sparseness_Q8 = 0; } else { /* Sparseness measure, based on relative fluctuations of energy per 2 milliseconds */ nSamples = SKP_LSHIFT( psEnc->sCmn.fs_kHz, 1 ); energy_variation_Q7 = 0; log_energy_prev_Q7 = 0; pitch_res_ptr = pitch_res; for( k = 0; k < FRAME_LENGTH_MS / 2; k++ ) { SKP_Silk_sum_sqr_shift( &nrg, &scale, pitch_res_ptr, nSamples ); nrg += SKP_RSHIFT( nSamples, scale ); // Q(-scale) log_energy_Q7 = SKP_Silk_lin2log( nrg ); if( k > 0 ) { energy_variation_Q7 += SKP_abs( log_energy_Q7 - log_energy_prev_Q7 ); } log_energy_prev_Q7 = log_energy_Q7; pitch_res_ptr += nSamples; } psEncCtrl->sparseness_Q8 = SKP_RSHIFT( SKP_Silk_sigm_Q15( SKP_SMULWB( energy_variation_Q7 - ( 5 << 7 ), 6554 ) ), 7 ); // 6554_Q16 = 0.1_Q0 /* Set quantization offset depending on sparseness measure */ if( psEncCtrl->sparseness_Q8 > SPARSENESS_THRESHOLD_QNT_OFFSET_Q8 ) { psEncCtrl->sCmn.QuantOffsetType = 0; } else { psEncCtrl->sCmn.QuantOffsetType = 1; } /* Increase coding SNR for sparse signals */ SNR_adj_dB_Q7 = SKP_SMLAWB( SNR_adj_dB_Q7, SPARSE_SNR_INCR_dB_Q7 << 8, psEncCtrl->sparseness_Q8 - ( 1 << 7 ) ); } /*******************************/ /* Control bandwidth expansion */ /*******************************/ delta_Q16 = SKP_SMULWB( ( 1 << 16 ) - SKP_SMULBB( 3, psEncCtrl->coding_quality_Q14 ), LOW_RATE_BANDWIDTH_EXPANSION_DELTA_Q16 ); BWExp1_Q16 = BANDWIDTH_EXPANSION_Q16 - delta_Q16; BWExp2_Q16 = BANDWIDTH_EXPANSION_Q16 + delta_Q16; if( psEnc->sCmn.fs_kHz == 24 ) { /* Less bandwidth expansion for super wideband */ BWExp1_Q16 = ( 1 << 16 ) - SKP_SMULWB( SWB_BANDWIDTH_EXPANSION_REDUCTION_Q16, ( 1 << 16 ) - BWExp1_Q16 ); BWExp2_Q16 = ( 1 << 16 ) - SKP_SMULWB( SWB_BANDWIDTH_EXPANSION_REDUCTION_Q16, ( 1 << 16 ) - BWExp2_Q16 ); } /* BWExp1 will be applied after BWExp2, so make it relative */ BWExp1_Q16 = SKP_DIV32_16( SKP_LSHIFT( BWExp1_Q16, 14 ), SKP_RSHIFT( BWExp2_Q16, 2 ) ); /********************************************/ /* Compute noise shaping AR coefs and gains */ /********************************************/ sz = ( SKP_int )SKP_SMULBB( SHAPE_LPC_WIN_MS, psEnc->sCmn.fs_kHz ); for( k = 0; k < NB_SUBFR; k++ ) { /* Apply window */ SKP_Silk_apply_sine_window( x_windowed, x_ptr, 0, SHAPE_LPC_WIN_MS * psEnc->sCmn.fs_kHz ); /* Update pointer: next LPC analysis block */ x_ptr += psEnc->sCmn.frame_length / NB_SUBFR; /* Calculate auto correlation */ SKP_Silk_autocorr( auto_corr, &scale, x_windowed, sz, psEnc->sCmn.shapingLPCOrder + 1 ); /* Add white noise, as a fraction of energy */ auto_corr[0] = SKP_ADD32( auto_corr[0], SKP_max_32( SKP_SMULWB( SKP_RSHIFT( auto_corr[ 0 ], 4 ), SHAPE_WHITE_NOISE_FRACTION_Q20 ), 1 ) ); /* Calculate the reflection coefficients using schur */ nrg = SKP_Silk_schur64( refl_coef_Q16, auto_corr, psEnc->sCmn.shapingLPCOrder ); /* Convert reflection coefficients to prediction coefficients */ SKP_Silk_k2a_Q16( AR_Q24, refl_coef_Q16, psEnc->sCmn.shapingLPCOrder ); /* Bandwidth expansion for synthesis filter shaping */ SKP_Silk_bwexpander_32( AR_Q24, psEnc->sCmn.shapingLPCOrder, BWExp2_Q16 ); /* Make sure to fit in Q13 SKP_int16 */ SKP_Silk_LPC_fit( &psEncCtrl->AR2_Q13[ k * SHAPE_LPC_ORDER_MAX ], AR_Q24, 13, psEnc->sCmn.shapingLPCOrder ); /* Compute noise shaping filter coefficients */ SKP_memcpy( &psEncCtrl->AR1_Q13[ k * SHAPE_LPC_ORDER_MAX ], &psEncCtrl->AR2_Q13[ k * SHAPE_LPC_ORDER_MAX ], psEnc->sCmn.shapingLPCOrder * sizeof( SKP_int16 ) ); /* Bandwidth expansion for analysis filter shaping */ SKP_assert( BWExp1_Q16 <= ( 1 << 16 ) ); // If ever breaking, use LPC_stabilize() in these cases to stay within range SKP_Silk_bwexpander( &psEncCtrl->AR1_Q13[ k * SHAPE_LPC_ORDER_MAX ], psEnc->sCmn.shapingLPCOrder, BWExp1_Q16 ); /* Increase residual energy */ nrg = SKP_SMLAWB( nrg, SKP_RSHIFT( auto_corr[ 0 ], 8 ), SHAPE_MIN_ENERGY_RATIO_Q24 ); Qnrg = -scale; // range: -12...30 SKP_assert( Qnrg >= -12 ); SKP_assert( Qnrg <= 30 ); /* Make sure that Qnrg is an even number */ if( Qnrg & 1 ) { Qnrg -= 1; nrg >>= 1; } tmp32 = SKP_Silk_SQRT_APPROX( nrg ); Qnrg >>= 1; // range: -6...15 sqrt_nrg[ k ] = tmp32; Qnrg_vec[ k ] = Qnrg; psEncCtrl->Gains_Q16[ k ] = SKP_LSHIFT_SAT32( tmp32, 16 - Qnrg ); /* Ratio of prediction gains, in energy domain */ SKP_Silk_LPC_inverse_pred_gain_Q13( &pre_nrg_Q30, &psEncCtrl->AR2_Q13[ k * SHAPE_LPC_ORDER_MAX ], psEnc->sCmn.shapingLPCOrder ); SKP_Silk_LPC_inverse_pred_gain_Q13( &nrg, &psEncCtrl->AR1_Q13[ k * SHAPE_LPC_ORDER_MAX ], psEnc->sCmn.shapingLPCOrder ); lz = SKP_min_32( SKP_Silk_CLZ32( pre_nrg_Q30 ) - 1, 19 ); pre_nrg_Q30 = SKP_DIV32( SKP_LSHIFT( pre_nrg_Q30, lz ), SKP_RSHIFT( nrg, 20 - lz ) + 1 ); // Q20 pre_nrg_Q30 = SKP_RSHIFT( SKP_LSHIFT_SAT32( pre_nrg_Q30, 9 ), 1 ); /* Q28 */ psEncCtrl->GainsPre_Q14[ k ] = ( SKP_int )SKP_Silk_SQRT_APPROX( pre_nrg_Q30 ); }