/* coefficient in an array of coefficients, for monic filters. */ static inline opus_int32 warped_gain( /* gain in Q16*/ const opus_int32 *coefs_Q24, opus_int lambda_Q16, opus_int order ) { opus_int i; opus_int32 gain_Q24; lambda_Q16 = -lambda_Q16; gain_Q24 = coefs_Q24[ order - 1 ]; for( i = order - 2; i >= 0; i-- ) { gain_Q24 = silk_SMLAWB( coefs_Q24[ i ], gain_Q24, lambda_Q16 ); } gain_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), gain_Q24, -lambda_Q16 ); return silk_INVERSE32_varQ( gain_Q24, 40 ); }
static OPUS_INLINE void silk_PLC_conceal( silk_decoder_state *psDec, /* I/O Decoder state */ silk_decoder_control *psDecCtrl, /* I/O Decoder control */ opus_int16 frame[], /* O LPC residual signal */ int arch /* I Run-time architecture */ ) { opus_int i, j, k; opus_int lag, idx, sLTP_buf_idx, shift1, shift2; opus_int32 rand_seed, harm_Gain_Q15, rand_Gain_Q15, inv_gain_Q30; opus_int32 energy1, energy2, *rand_ptr, *pred_lag_ptr; opus_int32 LPC_pred_Q10, LTP_pred_Q12; opus_int16 rand_scale_Q14; opus_int16 *B_Q14; opus_int32 *sLPC_Q14_ptr; opus_int16 A_Q12[ MAX_LPC_ORDER ]; #ifdef SMALL_FOOTPRINT opus_int16 *sLTP; #else VARDECL( opus_int16, sLTP ); #endif VARDECL( opus_int32, sLTP_Q14 ); silk_PLC_struct *psPLC = &psDec->sPLC; opus_int32 prevGain_Q10[2]; SAVE_STACK; ALLOC( sLTP_Q14, psDec->ltp_mem_length + psDec->frame_length, opus_int32 ); #ifdef SMALL_FOOTPRINT /* Ugly hack that breaks aliasing rules to save stack: put sLTP at the very end of sLTP_Q14. */ sLTP = ((opus_int16*)&sLTP_Q14[psDec->ltp_mem_length + psDec->frame_length])-psDec->ltp_mem_length; #else ALLOC( sLTP, psDec->ltp_mem_length, opus_int16 ); #endif prevGain_Q10[0] = silk_RSHIFT( psPLC->prevGain_Q16[ 0 ], 6); prevGain_Q10[1] = silk_RSHIFT( psPLC->prevGain_Q16[ 1 ], 6); if( psDec->first_frame_after_reset ) { silk_memset( psPLC->prevLPC_Q12, 0, sizeof( psPLC->prevLPC_Q12 ) ); } silk_PLC_energy(&energy1, &shift1, &energy2, &shift2, psDec->exc_Q14, prevGain_Q10, psDec->subfr_length, psDec->nb_subfr); if( silk_RSHIFT( energy1, shift2 ) < silk_RSHIFT( energy2, shift1 ) ) { /* First sub-frame has lowest energy */ rand_ptr = &psDec->exc_Q14[ silk_max_int( 0, ( psPLC->nb_subfr - 1 ) * psPLC->subfr_length - RAND_BUF_SIZE ) ]; } else { /* Second sub-frame has lowest energy */ rand_ptr = &psDec->exc_Q14[ silk_max_int( 0, psPLC->nb_subfr * psPLC->subfr_length - RAND_BUF_SIZE ) ]; } /* Set up Gain to random noise component */ B_Q14 = psPLC->LTPCoef_Q14; rand_scale_Q14 = psPLC->randScale_Q14; /* Set up attenuation gains */ harm_Gain_Q15 = HARM_ATT_Q15[ silk_min_int( NB_ATT - 1, psDec->lossCnt ) ]; if( psDec->prevSignalType == TYPE_VOICED ) { rand_Gain_Q15 = PLC_RAND_ATTENUATE_V_Q15[ silk_min_int( NB_ATT - 1, psDec->lossCnt ) ]; } else { rand_Gain_Q15 = PLC_RAND_ATTENUATE_UV_Q15[ silk_min_int( NB_ATT - 1, psDec->lossCnt ) ]; } /* LPC concealment. Apply BWE to previous LPC */ silk_bwexpander( psPLC->prevLPC_Q12, psDec->LPC_order, SILK_FIX_CONST( BWE_COEF, 16 ) ); /* Preload LPC coeficients to array on stack. Gives small performance gain */ silk_memcpy( A_Q12, psPLC->prevLPC_Q12, psDec->LPC_order * sizeof( opus_int16 ) ); /* First Lost frame */ if( psDec->lossCnt == 0 ) { rand_scale_Q14 = 1 << 14; /* Reduce random noise Gain for voiced frames */ if( psDec->prevSignalType == TYPE_VOICED ) { for( i = 0; i < LTP_ORDER; i++ ) { rand_scale_Q14 -= B_Q14[ i ]; } rand_scale_Q14 = silk_max_16( 3277, rand_scale_Q14 ); /* 0.2 */ rand_scale_Q14 = (opus_int16)silk_RSHIFT( silk_SMULBB( rand_scale_Q14, psPLC->prevLTP_scale_Q14 ), 14 ); } else { /* Reduce random noise for unvoiced frames with high LPC gain */ opus_int32 invGain_Q30, down_scale_Q30; invGain_Q30 = silk_LPC_inverse_pred_gain( psPLC->prevLPC_Q12, psDec->LPC_order, arch ); down_scale_Q30 = silk_min_32( silk_RSHIFT( (opus_int32)1 << 30, LOG2_INV_LPC_GAIN_HIGH_THRES ), invGain_Q30 ); down_scale_Q30 = silk_max_32( silk_RSHIFT( (opus_int32)1 << 30, LOG2_INV_LPC_GAIN_LOW_THRES ), down_scale_Q30 ); down_scale_Q30 = silk_LSHIFT( down_scale_Q30, LOG2_INV_LPC_GAIN_HIGH_THRES ); rand_Gain_Q15 = silk_RSHIFT( silk_SMULWB( down_scale_Q30, rand_Gain_Q15 ), 14 ); } } rand_seed = psPLC->rand_seed; lag = silk_RSHIFT_ROUND( psPLC->pitchL_Q8, 8 ); sLTP_buf_idx = psDec->ltp_mem_length; /* Rewhiten LTP state */ idx = psDec->ltp_mem_length - lag - psDec->LPC_order - LTP_ORDER / 2; silk_assert( idx > 0 ); silk_LPC_analysis_filter( &sLTP[ idx ], &psDec->outBuf[ idx ], A_Q12, psDec->ltp_mem_length - idx, psDec->LPC_order, arch ); /* Scale LTP state */ inv_gain_Q30 = silk_INVERSE32_varQ( psPLC->prevGain_Q16[ 1 ], 46 ); inv_gain_Q30 = silk_min( inv_gain_Q30, silk_int32_MAX >> 1 ); for( i = idx + psDec->LPC_order; i < psDec->ltp_mem_length; i++ ) { sLTP_Q14[ i ] = silk_SMULWB( inv_gain_Q30, sLTP[ i ] ); } /***************************/ /* LTP synthesis filtering */ /***************************/ for( k = 0; k < psDec->nb_subfr; k++ ) { /* Set up pointer */ pred_lag_ptr = &sLTP_Q14[ sLTP_buf_idx - lag + LTP_ORDER / 2 ]; for( i = 0; i < psDec->subfr_length; i++ ) { /* Unrolled loop */ /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ LTP_pred_Q12 = 2; LTP_pred_Q12 = silk_SMLAWB( LTP_pred_Q12, pred_lag_ptr[ 0 ], B_Q14[ 0 ] ); LTP_pred_Q12 = silk_SMLAWB( LTP_pred_Q12, pred_lag_ptr[ -1 ], B_Q14[ 1 ] ); LTP_pred_Q12 = silk_SMLAWB( LTP_pred_Q12, pred_lag_ptr[ -2 ], B_Q14[ 2 ] ); LTP_pred_Q12 = silk_SMLAWB( LTP_pred_Q12, pred_lag_ptr[ -3 ], B_Q14[ 3 ] ); LTP_pred_Q12 = silk_SMLAWB( LTP_pred_Q12, pred_lag_ptr[ -4 ], B_Q14[ 4 ] ); pred_lag_ptr++; /* Generate LPC excitation */ rand_seed = silk_RAND( rand_seed ); idx = silk_RSHIFT( rand_seed, 25 ) & RAND_BUF_MASK; sLTP_Q14[ sLTP_buf_idx ] = silk_LSHIFT32( silk_SMLAWB( LTP_pred_Q12, rand_ptr[ idx ], rand_scale_Q14 ), 2 ); sLTP_buf_idx++; } /* Gradually reduce LTP gain */ for( j = 0; j < LTP_ORDER; j++ ) { B_Q14[ j ] = silk_RSHIFT( silk_SMULBB( harm_Gain_Q15, B_Q14[ j ] ), 15 ); } if ( psDec->indices.signalType != TYPE_NO_VOICE_ACTIVITY ) { /* Gradually reduce excitation gain */ rand_scale_Q14 = silk_RSHIFT( silk_SMULBB( rand_scale_Q14, rand_Gain_Q15 ), 15 ); } /* Slowly increase pitch lag */ psPLC->pitchL_Q8 = silk_SMLAWB( psPLC->pitchL_Q8, psPLC->pitchL_Q8, PITCH_DRIFT_FAC_Q16 ); psPLC->pitchL_Q8 = silk_min_32( psPLC->pitchL_Q8, silk_LSHIFT( silk_SMULBB( MAX_PITCH_LAG_MS, psDec->fs_kHz ), 8 ) ); lag = silk_RSHIFT_ROUND( psPLC->pitchL_Q8, 8 ); } /***************************/ /* LPC synthesis filtering */ /***************************/ sLPC_Q14_ptr = &sLTP_Q14[ psDec->ltp_mem_length - MAX_LPC_ORDER ]; /* Copy LPC state */ silk_memcpy( sLPC_Q14_ptr, psDec->sLPC_Q14_buf, MAX_LPC_ORDER * sizeof( opus_int32 ) ); silk_assert( psDec->LPC_order >= 10 ); /* check that unrolling works */ for( i = 0; i < psDec->frame_length; i++ ) { /* partly unrolled */ /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ LPC_pred_Q10 = silk_RSHIFT( psDec->LPC_order, 1 ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 1 ], A_Q12[ 0 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 2 ], A_Q12[ 1 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 3 ], A_Q12[ 2 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 4 ], A_Q12[ 3 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 5 ], A_Q12[ 4 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 6 ], A_Q12[ 5 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 7 ], A_Q12[ 6 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 8 ], A_Q12[ 7 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 9 ], A_Q12[ 8 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 10 ], A_Q12[ 9 ] ); for( j = 10; j < psDec->LPC_order; j++ ) { LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - j - 1 ], A_Q12[ j ] ); } /* Add prediction to LPC excitation */ sLPC_Q14_ptr[ MAX_LPC_ORDER + i ] = silk_ADD_SAT32( sLPC_Q14_ptr[ MAX_LPC_ORDER + i ], silk_LSHIFT_SAT32( LPC_pred_Q10, 4 )); /* Scale with Gain */ frame[ i ] = (opus_int16)silk_SAT16( silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( sLPC_Q14_ptr[ MAX_LPC_ORDER + i ], prevGain_Q10[ 1 ] ), 8 ) ) ); } /* Save LPC state */ silk_memcpy( psDec->sLPC_Q14_buf, &sLPC_Q14_ptr[ psDec->frame_length ], MAX_LPC_ORDER * sizeof( opus_int32 ) ); /**************************************/ /* Update states */ /**************************************/ psPLC->rand_seed = rand_seed; psPLC->randScale_Q14 = rand_scale_Q14; for( i = 0; i < MAX_NB_SUBFR; i++ ) { psDecCtrl->pitchL[ i ] = lag; } RESTORE_STACK; }
/* amplitude of monic warped coefficients by using bandwidth expansion on the true coefficients */ static inline void limit_warped_coefs( opus_int32 *coefs_syn_Q24, opus_int32 *coefs_ana_Q24, opus_int lambda_Q16, opus_int32 limit_Q24, opus_int order ) { opus_int i, iter, ind = 0; opus_int32 tmp, maxabs_Q24, chirp_Q16, gain_syn_Q16, gain_ana_Q16; opus_int32 nom_Q16, den_Q24; /* Convert to monic coefficients */ lambda_Q16 = -lambda_Q16; for( i = order - 1; i > 0; i-- ) { coefs_syn_Q24[ i - 1 ] = silk_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 ); coefs_ana_Q24[ i - 1 ] = silk_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 ); } lambda_Q16 = -lambda_Q16; nom_Q16 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 16 ), -lambda_Q16, lambda_Q16 ); den_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_syn_Q24[ 0 ], lambda_Q16 ); gain_syn_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 ); den_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_ana_Q24[ 0 ], lambda_Q16 ); gain_ana_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 ); for( i = 0; i < order; i++ ) { coefs_syn_Q24[ i ] = silk_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] ); coefs_ana_Q24[ i ] = silk_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] ); } for( iter = 0; iter < 10; iter++ ) { /* Find maximum absolute value */ maxabs_Q24 = -1; for( i = 0; i < order; i++ ) { tmp = silk_max( silk_abs_int32( coefs_syn_Q24[ i ] ), silk_abs_int32( coefs_ana_Q24[ i ] ) ); if( tmp > maxabs_Q24 ) { maxabs_Q24 = tmp; ind = i; } } if( maxabs_Q24 <= limit_Q24 ) { /* Coefficients are within range - done */ return; } /* Convert back to true warped coefficients */ for( i = 1; i < order; i++ ) { coefs_syn_Q24[ i - 1 ] = silk_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 ); coefs_ana_Q24[ i - 1 ] = silk_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 ); } gain_syn_Q16 = silk_INVERSE32_varQ( gain_syn_Q16, 32 ); gain_ana_Q16 = silk_INVERSE32_varQ( gain_ana_Q16, 32 ); for( i = 0; i < order; i++ ) { coefs_syn_Q24[ i ] = silk_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] ); coefs_ana_Q24[ i ] = silk_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] ); } /* Apply bandwidth expansion */ chirp_Q16 = SILK_FIX_CONST( 0.99, 16 ) - silk_DIV32_varQ( silk_SMULWB( maxabs_Q24 - limit_Q24, silk_SMLABB( SILK_FIX_CONST( 0.8, 10 ), SILK_FIX_CONST( 0.1, 10 ), iter ) ), silk_MUL( maxabs_Q24, ind + 1 ), 22 ); silk_bwexpander_32( coefs_syn_Q24, order, chirp_Q16 ); silk_bwexpander_32( coefs_ana_Q24, order, chirp_Q16 ); /* Convert to monic warped coefficients */ lambda_Q16 = -lambda_Q16; for( i = order - 1; i > 0; i-- ) { coefs_syn_Q24[ i - 1 ] = silk_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 ); coefs_ana_Q24[ i - 1 ] = silk_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 ); } lambda_Q16 = -lambda_Q16; nom_Q16 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 16 ), -lambda_Q16, lambda_Q16 ); den_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_syn_Q24[ 0 ], lambda_Q16 ); gain_syn_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 ); den_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_ana_Q24[ 0 ], lambda_Q16 ); gain_ana_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 ); for( i = 0; i < order; i++ ) { coefs_syn_Q24[ i ] = silk_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] ); coefs_ana_Q24[ i ] = silk_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] ); } } silk_assert( 0 ); }
static OPUS_INLINE void silk_LDL_factorize_FIX( opus_int32 *A, /* I/O Pointer to Symetric Square Matrix */ opus_int M, /* I Size of Matrix */ opus_int32 *L_Q16, /* I/O Pointer to Square Upper triangular Matrix */ inv_D_t *inv_D /* I/O Pointer to vector holding inverted diagonal elements of D */ ) { opus_int i, j, k, status, loop_count; const opus_int32 *ptr1, *ptr2; opus_int32 diag_min_value, tmp_32, err; opus_int32 v_Q0[ MAX_MATRIX_SIZE ], D_Q0[ MAX_MATRIX_SIZE ]; opus_int32 one_div_diag_Q36, one_div_diag_Q40, one_div_diag_Q48; silk_assert( M <= MAX_MATRIX_SIZE ); status = 1; diag_min_value = silk_max_32( silk_SMMUL( silk_ADD_SAT32( A[ 0 ], A[ silk_SMULBB( M, M ) - 1 ] ), SILK_FIX_CONST( FIND_LTP_COND_FAC, 31 ) ), 1 << 9 ); for( loop_count = 0; loop_count < M && status == 1; loop_count++ ) { status = 0; for( j = 0; j < M; j++ ) { ptr1 = matrix_adr( L_Q16, j, 0, M ); tmp_32 = 0; for( i = 0; i < j; i++ ) { v_Q0[ i ] = silk_SMULWW( D_Q0[ i ], ptr1[ i ] ); /* Q0 */ tmp_32 = silk_SMLAWW( tmp_32, v_Q0[ i ], ptr1[ i ] ); /* Q0 */ } tmp_32 = silk_SUB32( matrix_ptr( A, j, j, M ), tmp_32 ); if( tmp_32 < diag_min_value ) { tmp_32 = silk_SUB32( silk_SMULBB( loop_count + 1, diag_min_value ), tmp_32 ); /* Matrix not positive semi-definite, or ill conditioned */ for( i = 0; i < M; i++ ) { matrix_ptr( A, i, i, M ) = silk_ADD32( matrix_ptr( A, i, i, M ), tmp_32 ); } status = 1; break; } D_Q0[ j ] = tmp_32; /* always < max(Correlation) */ /* two-step division */ one_div_diag_Q36 = silk_INVERSE32_varQ( tmp_32, 36 ); /* Q36 */ one_div_diag_Q40 = silk_LSHIFT( one_div_diag_Q36, 4 ); /* Q40 */ err = silk_SUB32( (opus_int32)1 << 24, silk_SMULWW( tmp_32, one_div_diag_Q40 ) ); /* Q24 */ one_div_diag_Q48 = silk_SMULWW( err, one_div_diag_Q40 ); /* Q48 */ /* Save 1/Ds */ inv_D[ j ].Q36_part = one_div_diag_Q36; inv_D[ j ].Q48_part = one_div_diag_Q48; matrix_ptr( L_Q16, j, j, M ) = 65536; /* 1.0 in Q16 */ ptr1 = matrix_adr( A, j, 0, M ); ptr2 = matrix_adr( L_Q16, j + 1, 0, M ); for( i = j + 1; i < M; i++ ) { tmp_32 = 0; for( k = 0; k < j; k++ ) { tmp_32 = silk_SMLAWW( tmp_32, v_Q0[ k ], ptr2[ k ] ); /* Q0 */ } tmp_32 = silk_SUB32( ptr1[ i ], tmp_32 ); /* always < max(Correlation) */ /* tmp_32 / D_Q0[j] : Divide to Q16 */ matrix_ptr( L_Q16, i, j, M ) = silk_ADD32( silk_SMMUL( tmp_32, one_div_diag_Q48 ), silk_RSHIFT( silk_SMULWW( tmp_32, one_div_diag_Q36 ), 4 ) ); /* go to next column */ ptr2 += M; } } } silk_assert( status == 0 ); }
static inline void silk_PLC_conceal( silk_decoder_state *psDec, /* I/O Decoder state */ silk_decoder_control *psDecCtrl, /* I/O Decoder control */ opus_int16 frame[] /* O LPC residual signal */ ) { opus_int i, j, k; opus_int lag, idx, sLTP_buf_idx, shift1, shift2; opus_int32 rand_seed, harm_Gain_Q15, rand_Gain_Q15, inv_gain_Q16, inv_gain_Q30; opus_int32 energy1, energy2, *rand_ptr, *pred_lag_ptr; opus_int32 LPC_exc_Q14, LPC_pred_Q10, LTP_pred_Q12; opus_int16 rand_scale_Q14; opus_int16 *B_Q14, *exc_buf_ptr; opus_int32 *sLPC_Q14_ptr; opus_int16 exc_buf[ 2 * MAX_SUB_FRAME_LENGTH ]; opus_int16 A_Q12[ MAX_LPC_ORDER ]; opus_int16 sLTP[ MAX_FRAME_LENGTH ]; opus_int32 sLTP_Q14[ 2 * MAX_FRAME_LENGTH ]; silk_PLC_struct *psPLC = &psDec->sPLC; if (psDec->first_frame_after_reset) silk_memset(psPLC->prevLPC_Q12, 0, MAX_LPC_ORDER*sizeof(psPLC->prevLPC_Q12[ 0 ])); /* Find random noise component */ /* Scale previous excitation signal */ exc_buf_ptr = exc_buf; for( k = 0; k < 2; k++ ) { for( i = 0; i < psPLC->subfr_length; i++ ) { exc_buf_ptr[ i ] = ( opus_int16 )silk_RSHIFT( silk_SMULWW( psDec->exc_Q10[ i + ( k + psPLC->nb_subfr - 2 ) * psPLC->subfr_length ], psPLC->prevGain_Q16[ k ] ), 10 ); } exc_buf_ptr += psPLC->subfr_length; } /* Find the subframe with lowest energy of the last two and use that as random noise generator */ silk_sum_sqr_shift( &energy1, &shift1, exc_buf, psPLC->subfr_length ); silk_sum_sqr_shift( &energy2, &shift2, &exc_buf[ psPLC->subfr_length ], psPLC->subfr_length ); if( silk_RSHIFT( energy1, shift2 ) < silk_RSHIFT( energy2, shift1 ) ) { /* First sub-frame has lowest energy */ rand_ptr = &psDec->exc_Q10[ silk_max_int( 0, ( psPLC->nb_subfr - 1 ) * psPLC->subfr_length - RAND_BUF_SIZE ) ]; } else { /* Second sub-frame has lowest energy */ rand_ptr = &psDec->exc_Q10[ silk_max_int( 0, psPLC->nb_subfr * psPLC->subfr_length - RAND_BUF_SIZE ) ]; } /* Setup Gain to random noise component */ B_Q14 = psPLC->LTPCoef_Q14; rand_scale_Q14 = psPLC->randScale_Q14; /* Setup attenuation gains */ harm_Gain_Q15 = HARM_ATT_Q15[ silk_min_int( NB_ATT - 1, psDec->lossCnt ) ]; if( psDec->prevSignalType == TYPE_VOICED ) { rand_Gain_Q15 = PLC_RAND_ATTENUATE_V_Q15[ silk_min_int( NB_ATT - 1, psDec->lossCnt ) ]; } else { rand_Gain_Q15 = PLC_RAND_ATTENUATE_UV_Q15[ silk_min_int( NB_ATT - 1, psDec->lossCnt ) ]; } /* LPC concealment. Apply BWE to previous LPC */ silk_bwexpander( psPLC->prevLPC_Q12, psDec->LPC_order, SILK_FIX_CONST( BWE_COEF, 16 ) ); /* Preload LPC coeficients to array on stack. Gives small performance gain */ silk_memcpy( A_Q12, psPLC->prevLPC_Q12, psDec->LPC_order * sizeof( opus_int16 ) ); /* First Lost frame */ if( psDec->lossCnt == 0 ) { rand_scale_Q14 = 1 << 14; /* Reduce random noise Gain for voiced frames */ if( psDec->prevSignalType == TYPE_VOICED ) { for( i = 0; i < LTP_ORDER; i++ ) { rand_scale_Q14 -= B_Q14[ i ]; } rand_scale_Q14 = silk_max_16( 3277, rand_scale_Q14 ); /* 0.2 */ rand_scale_Q14 = ( opus_int16 )silk_RSHIFT( silk_SMULBB( rand_scale_Q14, psPLC->prevLTP_scale_Q14 ), 14 ); } else { /* Reduce random noise for unvoiced frames with high LPC gain */ opus_int32 invGain_Q30, down_scale_Q30; silk_LPC_inverse_pred_gain( &invGain_Q30, psPLC->prevLPC_Q12, psDec->LPC_order ); down_scale_Q30 = silk_min_32( silk_RSHIFT( 1 << 30, LOG2_INV_LPC_GAIN_HIGH_THRES ), invGain_Q30 ); down_scale_Q30 = silk_max_32( silk_RSHIFT( 1 << 30, LOG2_INV_LPC_GAIN_LOW_THRES ), down_scale_Q30 ); down_scale_Q30 = silk_LSHIFT( down_scale_Q30, LOG2_INV_LPC_GAIN_HIGH_THRES ); rand_Gain_Q15 = silk_RSHIFT( silk_SMULWB( down_scale_Q30, rand_Gain_Q15 ), 14 ); } } rand_seed = psPLC->rand_seed; lag = silk_RSHIFT_ROUND( psPLC->pitchL_Q8, 8 ); sLTP_buf_idx = psDec->ltp_mem_length; /* Rewhiten LTP state */ idx = psDec->ltp_mem_length - lag - psDec->LPC_order - LTP_ORDER / 2; silk_assert( idx > 0 ); silk_LPC_analysis_filter( &sLTP[ idx ], &psDec->outBuf[ idx ], A_Q12, psDec->ltp_mem_length - idx, psDec->LPC_order ); /* Scale LTP state */ inv_gain_Q16 = silk_INVERSE32_varQ( psPLC->prevGain_Q16[ 1 ], 32 ); inv_gain_Q16 = silk_min( inv_gain_Q16, silk_int16_MAX ); inv_gain_Q30 = silk_LSHIFT( inv_gain_Q16, 14 ); for( i = idx + psDec->LPC_order; i < psDec->ltp_mem_length; i++ ) { sLTP_Q14[ i ] = silk_SMULWB( inv_gain_Q30, sLTP[ i ] ); } /***************************/ /* LTP synthesis filtering */ /***************************/ for( k = 0; k < psDec->nb_subfr; k++ ) { /* Setup pointer */ pred_lag_ptr = &sLTP_Q14[ sLTP_buf_idx - lag + LTP_ORDER / 2 ]; for( i = 0; i < psDec->subfr_length; i++ ) { /* Unrolled loop */ LTP_pred_Q12 = silk_SMULWB( pred_lag_ptr[ 0 ], B_Q14[ 0 ] ); LTP_pred_Q12 = silk_SMLAWB( LTP_pred_Q12, pred_lag_ptr[ -1 ], B_Q14[ 1 ] ); LTP_pred_Q12 = silk_SMLAWB( LTP_pred_Q12, pred_lag_ptr[ -2 ], B_Q14[ 2 ] ); LTP_pred_Q12 = silk_SMLAWB( LTP_pred_Q12, pred_lag_ptr[ -3 ], B_Q14[ 3 ] ); LTP_pred_Q12 = silk_SMLAWB( LTP_pred_Q12, pred_lag_ptr[ -4 ], B_Q14[ 4 ] ); pred_lag_ptr++; /* Generate LPC excitation */ rand_seed = silk_RAND( rand_seed ); idx = silk_RSHIFT( rand_seed, 25 ) & RAND_BUF_MASK; LPC_exc_Q14 = silk_LSHIFT32( silk_SMULWB( rand_ptr[ idx ], rand_scale_Q14 ), 6 ); /* Random noise part */ LPC_exc_Q14 = silk_ADD32( LPC_exc_Q14, silk_LSHIFT32( LTP_pred_Q12, 2 ) ); /* Harmonic part */ sLTP_Q14[ sLTP_buf_idx ] = LPC_exc_Q14; sLTP_buf_idx++; } /* Gradually reduce LTP gain */ for( j = 0; j < LTP_ORDER; j++ ) { B_Q14[ j ] = silk_RSHIFT( silk_SMULBB( harm_Gain_Q15, B_Q14[ j ] ), 15 ); } /* Gradually reduce excitation gain */ rand_scale_Q14 = silk_RSHIFT( silk_SMULBB( rand_scale_Q14, rand_Gain_Q15 ), 15 ); /* Slowly increase pitch lag */ psPLC->pitchL_Q8 = silk_SMLAWB( psPLC->pitchL_Q8, psPLC->pitchL_Q8, PITCH_DRIFT_FAC_Q16 ); psPLC->pitchL_Q8 = silk_min_32( psPLC->pitchL_Q8, silk_LSHIFT( silk_SMULBB( MAX_PITCH_LAG_MS, psDec->fs_kHz ), 8 ) ); lag = silk_RSHIFT_ROUND( psPLC->pitchL_Q8, 8 ); } /***************************/ /* LPC synthesis filtering */ /***************************/ sLPC_Q14_ptr = &sLTP_Q14[ psDec->ltp_mem_length - MAX_LPC_ORDER ]; /* Copy LPC state */ silk_memcpy( sLPC_Q14_ptr, psDec->sLPC_Q14_buf, MAX_LPC_ORDER * sizeof( opus_int32 ) ); silk_assert( psDec->LPC_order >= 10 ); /* check that unrolling works */ for( i = 0; i < psDec->frame_length; i++ ) { /* partly unrolled */ LPC_pred_Q10 = silk_SMULWB( sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 1 ], A_Q12[ 0 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 2 ], A_Q12[ 1 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 3 ], A_Q12[ 2 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 4 ], A_Q12[ 3 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 5 ], A_Q12[ 4 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 6 ], A_Q12[ 5 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 7 ], A_Q12[ 6 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 8 ], A_Q12[ 7 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 9 ], A_Q12[ 8 ] ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - 10 ], A_Q12[ 9 ] ); for( j = 10; j < psDec->LPC_order; j++ ) { LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14_ptr[ MAX_LPC_ORDER + i - j - 1 ], A_Q12[ j ] ); } /* Add prediction to LPC excitation */ sLPC_Q14_ptr[ MAX_LPC_ORDER + i ] = silk_ADD_LSHIFT32( sLPC_Q14_ptr[ MAX_LPC_ORDER + i ], LPC_pred_Q10, 4 ); /* Scale with Gain */ frame[ i ] = ( opus_int16 )silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( sLPC_Q14_ptr[ MAX_LPC_ORDER + i ], psPLC->prevGain_Q16[ 1 ] ), 14 ) ); } /* Save LPC state */ silk_memcpy( psDec->sLPC_Q14_buf, &sLPC_Q14_ptr[ psDec->frame_length ], MAX_LPC_ORDER * sizeof( opus_int32 ) ); /**************************************/ /* Update states */ /**************************************/ psPLC->rand_seed = rand_seed; psPLC->randScale_Q14 = rand_scale_Q14; for( i = 0; i < MAX_NB_SUBFR; i++ ) { psDecCtrl->pitchL[ i ] = lag; } }
/* test if LPC coefficients are stable (all poles within unit circle) */ static opus_int32 LPC_inverse_pred_gain_QA_c( /* O Returns inverse prediction gain in energy domain, Q30 */ opus_int32 A_QA[ SILK_MAX_ORDER_LPC ], /* I Prediction coefficients */ const opus_int order /* I Prediction order */ ) { opus_int k, n, mult2Q; opus_int32 invGain_Q30, rc_Q31, rc_mult1_Q30, rc_mult2, tmp1, tmp2; invGain_Q30 = SILK_FIX_CONST( 1, 30 ); for( k = order - 1; k > 0; k-- ) { /* Check for stability */ if( ( A_QA[ k ] > A_LIMIT ) || ( A_QA[ k ] < -A_LIMIT ) ) { return 0; } /* Set RC equal to negated AR coef */ rc_Q31 = -silk_LSHIFT( A_QA[ k ], 31 - QA ); /* rc_mult1_Q30 range: [ 1 : 2^30 ] */ rc_mult1_Q30 = silk_SUB32( SILK_FIX_CONST( 1, 30 ), silk_SMMUL( rc_Q31, rc_Q31 ) ); silk_assert( rc_mult1_Q30 > ( 1 << 15 ) ); /* reduce A_LIMIT if fails */ silk_assert( rc_mult1_Q30 <= ( 1 << 30 ) ); /* Update inverse gain */ /* invGain_Q30 range: [ 0 : 2^30 ] */ invGain_Q30 = silk_LSHIFT( silk_SMMUL( invGain_Q30, rc_mult1_Q30 ), 2 ); silk_assert( invGain_Q30 >= 0 ); silk_assert( invGain_Q30 <= ( 1 << 30 ) ); if( invGain_Q30 < SILK_FIX_CONST( 1.0f / MAX_PREDICTION_POWER_GAIN, 30 ) ) { return 0; } /* rc_mult2 range: [ 2^30 : silk_int32_MAX ] */ mult2Q = 32 - silk_CLZ32( silk_abs( rc_mult1_Q30 ) ); rc_mult2 = silk_INVERSE32_varQ( rc_mult1_Q30, mult2Q + 30 ); /* Update AR coefficient */ for( n = 0; n < (k + 1) >> 1; n++ ) { opus_int64 tmp64; tmp1 = A_QA[ n ]; tmp2 = A_QA[ k - n - 1 ]; tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( silk_SUB_SAT32(tmp1, MUL32_FRAC_Q( tmp2, rc_Q31, 31 ) ), rc_mult2 ), mult2Q); if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) { return 0; } A_QA[ n ] = ( opus_int32 )tmp64; tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( silk_SUB_SAT32(tmp2, MUL32_FRAC_Q( tmp1, rc_Q31, 31 ) ), rc_mult2), mult2Q); if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) { return 0; } A_QA[ k - n - 1 ] = ( opus_int32 )tmp64; } } /* Check for stability */ if( ( A_QA[ k ] > A_LIMIT ) || ( A_QA[ k ] < -A_LIMIT ) ) { return 0; } /* Set RC equal to negated AR coef */ rc_Q31 = -silk_LSHIFT( A_QA[ 0 ], 31 - QA ); /* Range: [ 1 : 2^30 ] */ rc_mult1_Q30 = silk_SUB32( SILK_FIX_CONST( 1, 30 ), silk_SMMUL( rc_Q31, rc_Q31 ) ); /* Update inverse gain */ /* Range: [ 0 : 2^30 ] */ invGain_Q30 = silk_LSHIFT( silk_SMMUL( invGain_Q30, rc_mult1_Q30 ), 2 ); silk_assert( invGain_Q30 >= 0 ); silk_assert( invGain_Q30 <= ( 1 << 30 ) ); if( invGain_Q30 < SILK_FIX_CONST( 1.0f / MAX_PREDICTION_POWER_GAIN, 30 ) ) { return 0; } return invGain_Q30; }
/* test if LPC coefficients are stable (all poles within unit circle) */ static opus_int32 LPC_inverse_pred_gain_QA( /* O Returns inverse prediction gain in energy domain, Q30 */ opus_int32 A_QA[ 2 ][ SILK_MAX_ORDER_LPC ], /* I Prediction coefficients */ const opus_int order /* I Prediction order */ ) { opus_int k, n, mult2Q; opus_int32 invGain_Q30, rc_Q31, rc_mult1_Q30, rc_mult2, tmp_QA; opus_int32 *Aold_QA, *Anew_QA; Anew_QA = A_QA[ order & 1 ]; invGain_Q30 = (opus_int32)1 << 30; for( k = order - 1; k > 0; k-- ) { /* Check for stability */ if( ( Anew_QA[ k ] > A_LIMIT ) || ( Anew_QA[ k ] < -A_LIMIT ) ) { return 0; } /* Set RC equal to negated AR coef */ rc_Q31 = -silk_LSHIFT( Anew_QA[ k ], 31 - QA ); /* rc_mult1_Q30 range: [ 1 : 2^30 ] */ rc_mult1_Q30 = ( (opus_int32)1 << 30 ) - silk_SMMUL( rc_Q31, rc_Q31 ); silk_assert( rc_mult1_Q30 > ( 1 << 15 ) ); /* reduce A_LIMIT if fails */ silk_assert( rc_mult1_Q30 <= ( 1 << 30 ) ); /* rc_mult2 range: [ 2^30 : silk_int32_MAX ] */ mult2Q = 32 - silk_CLZ32( silk_abs( rc_mult1_Q30 ) ); rc_mult2 = silk_INVERSE32_varQ( rc_mult1_Q30, mult2Q + 30 ); /* Update inverse gain */ /* invGain_Q30 range: [ 0 : 2^30 ] */ invGain_Q30 = silk_LSHIFT( silk_SMMUL( invGain_Q30, rc_mult1_Q30 ), 2 ); silk_assert( invGain_Q30 >= 0 ); silk_assert( invGain_Q30 <= ( 1 << 30 ) ); /* Swap pointers */ Aold_QA = Anew_QA; Anew_QA = A_QA[ k & 1 ]; /* Update AR coefficient */ for( n = 0; n < k; n++ ) { tmp_QA = Aold_QA[ n ] - MUL32_FRAC_Q( Aold_QA[ k - n - 1 ], rc_Q31, 31 ); Anew_QA[ n ] = MUL32_FRAC_Q( tmp_QA, rc_mult2 , mult2Q ); } } /* Check for stability */ if( ( Anew_QA[ 0 ] > A_LIMIT ) || ( Anew_QA[ 0 ] < -A_LIMIT ) ) { return 0; } /* Set RC equal to negated AR coef */ rc_Q31 = -silk_LSHIFT( Anew_QA[ 0 ], 31 - QA ); /* Range: [ 1 : 2^30 ] */ rc_mult1_Q30 = ( (opus_int32)1 << 30 ) - silk_SMMUL( rc_Q31, rc_Q31 ); /* Update inverse gain */ /* Range: [ 0 : 2^30 ] */ invGain_Q30 = silk_LSHIFT( silk_SMMUL( invGain_Q30, rc_mult1_Q30 ), 2 ); silk_assert( invGain_Q30 >= 0 ); silk_assert( invGain_Q30 <= 1<<30 ); return invGain_Q30; }