/* High-pass filter with cutoff frequency adaptation based on pitch lag statistics */ void silk_HP_variable_cutoff(silk_encoder_state_Fxx state_Fxx[] /* I/O Encoder states */ ) { int quality_Q15; int32_t pitch_freq_Hz_Q16, pitch_freq_log_Q7, delta_freq_Q7; silk_encoder_state *psEncC1 = &state_Fxx[0].sCmn; /* Adaptive cutoff frequency: estimate low end of pitch frequency range */ if (psEncC1->prevSignalType == TYPE_VOICED) { /* difference, in log domain */ pitch_freq_Hz_Q16 = silk_DIV32_16(silk_LSHIFT (silk_MUL(psEncC1->fs_kHz, 1000), 16), psEncC1->prevLag); pitch_freq_log_Q7 = silk_lin2log(pitch_freq_Hz_Q16) - (16 << 7); /* adjustment based on quality */ quality_Q15 = psEncC1->input_quality_bands_Q15[0]; pitch_freq_log_Q7 = silk_SMLAWB(pitch_freq_log_Q7, silk_SMULWB(silk_LSHIFT(-quality_Q15, 2), quality_Q15), pitch_freq_log_Q7 - (silk_lin2log (SILK_FIX_CONST(VARIABLE_HP_MIN_CUTOFF_HZ, 16)) - (16 << 7))); /* delta_freq = pitch_freq_log - psEnc->variable_HP_smth1; */ delta_freq_Q7 = pitch_freq_log_Q7 - silk_RSHIFT(psEncC1->variable_HP_smth1_Q15, 8); if (delta_freq_Q7 < 0) { /* less smoothing for decreasing pitch frequency, to track something close to the minimum */ delta_freq_Q7 = silk_MUL(delta_freq_Q7, 3); } /* limit delta, to reduce impact of outliers in pitch estimation */ delta_freq_Q7 = silk_LIMIT_32(delta_freq_Q7, -SILK_FIX_CONST(VARIABLE_HP_MAX_DELTA_FREQ, 7), SILK_FIX_CONST(VARIABLE_HP_MAX_DELTA_FREQ, 7)); /* update smoother */ psEncC1->variable_HP_smth1_Q15 = silk_SMLAWB(psEncC1->variable_HP_smth1_Q15, silk_SMULBB(psEncC1->speech_activity_Q8, delta_freq_Q7), SILK_FIX_CONST(VARIABLE_HP_SMTH_COEF1, 16)); /* limit frequency range */ psEncC1->variable_HP_smth1_Q15 = silk_LIMIT_32(psEncC1->variable_HP_smth1_Q15, silk_LSHIFT(silk_lin2log (VARIABLE_HP_MIN_CUTOFF_HZ), 8), silk_LSHIFT(silk_lin2log (VARIABLE_HP_MAX_CUTOFF_HZ), 8)); } }
static OPUS_INLINE void silk_A2NLSF_init( const opus_int32 *a_Q16, opus_int32 *P, opus_int32 *Q, const opus_int dd ) { opus_int k; /* Convert filter coefs to even and odd polynomials */ P[dd] = silk_LSHIFT( 1, 16 ); Q[dd] = silk_LSHIFT( 1, 16 ); for( k = 0; k < dd; k++ ) { P[ k ] = -a_Q16[ dd - k - 1 ] - a_Q16[ dd + k ]; /* Q16 */ Q[ k ] = -a_Q16[ dd - k - 1 ] + a_Q16[ dd + k ]; /* Q16 */ } /* Divide out zeros as we have that for even filter orders, */ /* z = 1 is always a root in Q, and */ /* z = -1 is always a root in P */ for( k = dd; k > 0; k-- ) { P[ k - 1 ] -= P[ k ]; Q[ k - 1 ] += Q[ k ]; } /* Transform polynomials from cos(n*f) to cos(f)^n */ silk_A2NLSF_trans_poly( P, dd ); silk_A2NLSF_trans_poly( Q, dd ); }
/* uses SMLAWB(), requiring armv5E and higher. */ opus_int32 silk_schur( /* O Returns residual energy */ opus_int16 *rc_Q15, /* O reflection coefficients [order] Q15 */ const opus_int32 *c, /* I correlations [order+1] */ const opus_int32 order /* I prediction order */ ) { opus_int k, n, lz; opus_int32 C[ SILK_MAX_ORDER_LPC + 1 ][ 2 ]; opus_int32 Ctmp1, Ctmp2, rc_tmp_Q15; silk_assert( order==6||order==8||order==10||order==12||order==14||order==16 ); /* Get number of leading zeros */ lz = silk_CLZ32( c[ 0 ] ); /* Copy correlations and adjust level to Q30 */ if( lz < 2 ) { /* lz must be 1, so shift one to the right */ for( k = 0; k < order + 1; k++ ) { C[ k ][ 0 ] = C[ k ][ 1 ] = silk_RSHIFT( c[ k ], 1 ); } } else if( lz > 2 ) { /* Shift to the left */ lz -= 2; for( k = 0; k < order + 1; k++ ) { C[ k ][ 0 ] = C[ k ][ 1 ] = silk_LSHIFT( c[ k ], lz ); } } else { /* No need to shift */ for( k = 0; k < order + 1; k++ ) { C[ k ][ 0 ] = C[ k ][ 1 ] = c[ k ]; } } for( k = 0; k < order; k++ ) { /* Get reflection coefficient */ rc_tmp_Q15 = -silk_DIV32_16( C[ k + 1 ][ 0 ], silk_max_32( silk_RSHIFT( C[ 0 ][ 1 ], 15 ), 1 ) ); /* Clip (shouldn't happen for properly conditioned inputs) */ rc_tmp_Q15 = silk_SAT16( rc_tmp_Q15 ); /* Store */ rc_Q15[ k ] = (opus_int16)rc_tmp_Q15; /* Update correlations */ for( n = 0; n < order - k; n++ ) { Ctmp1 = C[ n + k + 1 ][ 0 ]; Ctmp2 = C[ n ][ 1 ]; C[ n + k + 1 ][ 0 ] = silk_SMLAWB( Ctmp1, silk_LSHIFT( Ctmp2, 1 ), rc_tmp_Q15 ); C[ n ][ 1 ] = silk_SMLAWB( Ctmp2, silk_LSHIFT( Ctmp1, 1 ), rc_tmp_Q15 ); } } /* return residual energy */ return C[ 0 ][ 1 ]; }
/* Uses SMULL(), available on armv4 */ opus_int32 silk_schur64( /* O returns residual energy */ opus_int32 rc_Q16[], /* O Reflection coefficients [order] Q16 */ const opus_int32 c[], /* I Correlations [order+1] */ opus_int32 order /* I Prediction order */ ) { opus_int k, n; opus_int32 C[ SILK_MAX_ORDER_LPC + 1 ][ 2 ]; opus_int32 Ctmp1_Q30, Ctmp2_Q30, rc_tmp_Q31; silk_assert( order==6||order==8||order==10||order==12||order==14||order==16 ); /* Check for invalid input */ if( c[ 0 ] <= 0 ) { silk_memset( rc_Q16, 0, order * sizeof( opus_int32 ) ); return 0; } for( k = 0; k < order + 1; k++ ) { C[ k ][ 0 ] = C[ k ][ 1 ] = c[ k ]; } for( k = 0; k < order; k++ ) { /* Check that we won't be getting an unstable rc, otherwise stop here. */ if (silk_abs_int32(C[ k + 1 ][ 0 ]) >= C[ 0 ][ 1 ]) { if ( C[ k + 1 ][ 0 ] > 0 ) { rc_Q16[ k ] = -SILK_FIX_CONST( .99f, 16 ); } else { rc_Q16[ k ] = SILK_FIX_CONST( .99f, 16 ); } k++; break; } /* Get reflection coefficient: divide two Q30 values and get result in Q31 */ rc_tmp_Q31 = silk_DIV32_varQ( -C[ k + 1 ][ 0 ], C[ 0 ][ 1 ], 31 ); /* Save the output */ rc_Q16[ k ] = silk_RSHIFT_ROUND( rc_tmp_Q31, 15 ); /* Update correlations */ for( n = 0; n < order - k; n++ ) { Ctmp1_Q30 = C[ n + k + 1 ][ 0 ]; Ctmp2_Q30 = C[ n ][ 1 ]; /* Multiply and add the highest int32 */ C[ n + k + 1 ][ 0 ] = Ctmp1_Q30 + silk_SMMUL( silk_LSHIFT( Ctmp2_Q30, 1 ), rc_tmp_Q31 ); C[ n ][ 1 ] = Ctmp2_Q30 + silk_SMMUL( silk_LSHIFT( Ctmp1_Q30, 1 ), rc_tmp_Q31 ); } } for(; k < order; k++ ) { rc_Q16[ k ] = 0; } return silk_max_32( 1, C[ 0 ][ 1 ] ); }
/* Glues concealed frames with new good received frames */ void silk_PLC_glue_frames( silk_decoder_state *psDec, /* I/O decoder state */ opus_int16 frame[], /* I/O signal */ opus_int length /* I length of signal */ ) { opus_int i, energy_shift; opus_int32 energy; silk_PLC_struct *psPLC; psPLC = &psDec->sPLC; if( psDec->lossCnt ) { /* Calculate energy in concealed residual */ silk_sum_sqr_shift( &psPLC->conc_energy, &psPLC->conc_energy_shift, frame, length ); psPLC->last_frame_lost = 1; } else { if( psDec->sPLC.last_frame_lost ) { /* Calculate residual in decoded signal if last frame was lost */ silk_sum_sqr_shift( &energy, &energy_shift, frame, length ); /* Normalize energies */ if( energy_shift > psPLC->conc_energy_shift ) { psPLC->conc_energy = silk_RSHIFT( psPLC->conc_energy, energy_shift - psPLC->conc_energy_shift ); } else if( energy_shift < psPLC->conc_energy_shift ) { energy = silk_RSHIFT( energy, psPLC->conc_energy_shift - energy_shift ); } /* Fade in the energy difference */ if( energy > psPLC->conc_energy ) { opus_int32 frac_Q24, LZ; opus_int32 gain_Q16, slope_Q16; LZ = silk_CLZ32( psPLC->conc_energy ); LZ = LZ - 1; psPLC->conc_energy = silk_LSHIFT( psPLC->conc_energy, LZ ); energy = silk_RSHIFT( energy, silk_max_32( 24 - LZ, 0 ) ); frac_Q24 = silk_DIV32( psPLC->conc_energy, silk_max( energy, 1 ) ); gain_Q16 = silk_LSHIFT( silk_SQRT_APPROX( frac_Q24 ), 4 ); slope_Q16 = silk_DIV32_16( ( (opus_int32)1 << 16 ) - gain_Q16, length ); /* Make slope 4x steeper to avoid missing onsets after DTX */ slope_Q16 = silk_LSHIFT( slope_Q16, 2 ); for( i = 0; i < length; i++ ) { frame[ i ] = silk_SMULWB( gain_Q16, frame[ i ] ); gain_Q16 += slope_Q16; if( gain_Q16 > (opus_int32)1 << 16 ) { break; } } } } psPLC->last_frame_lost = 0; } }
/* Prefilter for finding Quantizer input signal */ static OPUS_INLINE void silk_prefilt_FIX( silk_prefilter_state_FIX *P, /* I/O state */ opus_int32 st_res_Q12[], /* I short term residual signal */ opus_int32 xw_Q3[], /* O prefiltered signal */ opus_int32 HarmShapeFIRPacked_Q12, /* I Harmonic shaping coeficients */ opus_int Tilt_Q14, /* I Tilt shaping coeficient */ opus_int32 LF_shp_Q14, /* I Low-frequancy shaping coeficients */ opus_int lag, /* I Lag for harmonic shaping */ opus_int length /* I Length of signals */ ) { opus_int i, idx, LTP_shp_buf_idx; opus_int32 n_LTP_Q12, n_Tilt_Q10, n_LF_Q10; opus_int32 sLF_MA_shp_Q12, sLF_AR_shp_Q12; opus_int16 *LTP_shp_buf; /* To speed up use temp variables instead of using the struct */ LTP_shp_buf = P->sLTP_shp; LTP_shp_buf_idx = P->sLTP_shp_buf_idx; sLF_AR_shp_Q12 = P->sLF_AR_shp_Q12; sLF_MA_shp_Q12 = P->sLF_MA_shp_Q12; for (i = 0; i < length; i++) { if (lag > 0) { /* unrolled loop */ silk_assert(HARM_SHAPE_FIR_TAPS == 3); idx = lag + LTP_shp_buf_idx; n_LTP_Q12 = silk_SMULBB(LTP_shp_buf[(idx - HARM_SHAPE_FIR_TAPS / 2 - 1) & LTP_MASK], HarmShapeFIRPacked_Q12); n_LTP_Q12 = silk_SMLABT(n_LTP_Q12, LTP_shp_buf[(idx - HARM_SHAPE_FIR_TAPS / 2) & LTP_MASK], HarmShapeFIRPacked_Q12); n_LTP_Q12 = silk_SMLABB(n_LTP_Q12, LTP_shp_buf[(idx - HARM_SHAPE_FIR_TAPS / 2 + 1) & LTP_MASK], HarmShapeFIRPacked_Q12); } else { n_LTP_Q12 = 0; } n_Tilt_Q10 = silk_SMULWB(sLF_AR_shp_Q12, Tilt_Q14); n_LF_Q10 = silk_SMLAWB(silk_SMULWT(sLF_AR_shp_Q12, LF_shp_Q14), sLF_MA_shp_Q12, LF_shp_Q14); sLF_AR_shp_Q12 = silk_SUB32(st_res_Q12[i], silk_LSHIFT(n_Tilt_Q10, 2)); sLF_MA_shp_Q12 = silk_SUB32(sLF_AR_shp_Q12, silk_LSHIFT(n_LF_Q10, 2)); LTP_shp_buf_idx = (LTP_shp_buf_idx - 1) & LTP_MASK; LTP_shp_buf[LTP_shp_buf_idx] = (opus_int16) silk_SAT16( silk_RSHIFT_ROUND(sLF_MA_shp_Q12, 12)); xw_Q3[i] = silk_RSHIFT_ROUND(silk_SUB32(sLF_MA_shp_Q12, n_LTP_Q12), 9); } /* Copy temp variable back to state */ P->sLF_AR_shp_Q12 = sLF_AR_shp_Q12; P->sLF_MA_shp_Q12 = sLF_MA_shp_Q12; P->sLTP_shp_buf_idx = LTP_shp_buf_idx; }
/* Find least-squares prediction gain for one signal based on another and quantize it */ int32_t silk_stereo_find_predictor( /* O Returns predictor in Q13 */ int32_t * ratio_Q14, /* O Ratio of residual and mid energies */ const int16_t x[], /* I Basis signal */ const int16_t y[], /* I Target signal */ int32_t mid_res_amp_Q0[], /* I/O Smoothed mid, residual norms */ int length, /* I Number of samples */ int smooth_coef_Q16 /* I Smoothing coefficient */ ) { int scale, scale1, scale2; int32_t nrgx, nrgy, corr, pred_Q13, pred2_Q10; /* Find predictor */ silk_sum_sqr_shift(&nrgx, &scale1, x, length); silk_sum_sqr_shift(&nrgy, &scale2, y, length); scale = silk_max_int(scale1, scale2); scale = scale + (scale & 1); /* make even */ nrgy = silk_RSHIFT32(nrgy, scale - scale2); nrgx = silk_RSHIFT32(nrgx, scale - scale1); nrgx = silk_max_int(nrgx, 1); corr = silk_inner_prod_aligned_scale(x, y, scale, length); pred_Q13 = silk_DIV32_varQ(corr, nrgx, 13); pred_Q13 = silk_LIMIT(pred_Q13, -(1 << 14), 1 << 14); pred2_Q10 = silk_SMULWB(pred_Q13, pred_Q13); /* Faster update for signals with large prediction parameters */ smooth_coef_Q16 = (int) silk_max_int(smooth_coef_Q16, silk_abs(pred2_Q10)); /* Smoothed mid and residual norms */ assert(smooth_coef_Q16 < 32768); scale = silk_RSHIFT(scale, 1); mid_res_amp_Q0[0] = silk_SMLAWB(mid_res_amp_Q0[0], silk_LSHIFT(silk_SQRT_APPROX(nrgx), scale) - mid_res_amp_Q0[0], smooth_coef_Q16); /* Residual energy = nrgy - 2 * pred * corr + pred^2 * nrgx */ nrgy = silk_SUB_LSHIFT32(nrgy, silk_SMULWB(corr, pred_Q13), 3 + 1); nrgy = silk_ADD_LSHIFT32(nrgy, silk_SMULWB(nrgx, pred2_Q10), 6); mid_res_amp_Q0[1] = silk_SMLAWB(mid_res_amp_Q0[1], silk_LSHIFT(silk_SQRT_APPROX(nrgy), scale) - mid_res_amp_Q0[1], smooth_coef_Q16); /* Ratio of smoothed residual and mid norms */ *ratio_Q14 = silk_DIV32_varQ(mid_res_amp_Q0[1], silk_max(mid_res_amp_Q0[0], 1), 14); *ratio_Q14 = silk_LIMIT(*ratio_Q14, 0, 32767); return pred_Q13; }
/* Control SNR of redidual quantizer */ opus_int silk_control_SNR( silk_encoder_state *psEncC, /* I/O Pointer to Silk encoder state */ opus_int32 TargetRate_bps /* I Target max bitrate (bps) */ ) { opus_int k, ret = SILK_NO_ERROR; opus_int32 frac_Q6; const opus_int32 *rateTable; /* Set bitrate/coding quality */ TargetRate_bps = silk_LIMIT(TargetRate_bps, MIN_TARGET_RATE_BPS, MAX_TARGET_RATE_BPS); if (TargetRate_bps != psEncC->TargetRate_bps) { psEncC->TargetRate_bps = TargetRate_bps; /* If new TargetRate_bps, translate to SNR_dB value */ if (psEncC->fs_kHz == 8) { rateTable = silk_TargetRate_table_NB; } else if (psEncC->fs_kHz == 12) { rateTable = silk_TargetRate_table_MB; } else { rateTable = silk_TargetRate_table_WB; } /* Reduce bitrate for 10 ms modes in these calculations */ if (psEncC->nb_subfr == 2) { TargetRate_bps -= REDUCE_BITRATE_10_MS_BPS; } /* Find bitrate interval in table and interpolate */ for (k = 1; k < TARGET_RATE_TAB_SZ; k++) { if (TargetRate_bps <= rateTable[k]) { frac_Q6 = silk_DIV32(silk_LSHIFT(TargetRate_bps - rateTable[k - 1], 6), rateTable[k] - rateTable[k - 1]); psEncC->SNR_dB_Q7 = silk_LSHIFT(silk_SNR_table_Q1[k - 1], 6) + silk_MUL(frac_Q6, silk_SNR_table_Q1[k] - silk_SNR_table_Q1[ k - 1]); break; } } /* Reduce coding quality whenever LBRR is enabled, to free up some bits */ if (psEncC->LBRR_enabled) { psEncC->SNR_dB_Q7 = silk_SMLABB(psEncC->SNR_dB_Q7, 12 - psEncC->LBRR_GainIncreases, SILK_FIX_CONST(-0.25, 7)); } } return ret; }
/* Polynomial evaluation */ static OPUS_INLINE opus_int32 silk_A2NLSF_eval_poly( /* return the polynomial evaluation, in Q16 */ opus_int32 *p, /* I Polynomial, Q16 */ const opus_int32 x, /* I Evaluation point, Q12 */ const opus_int dd /* I Order */ ) { opus_int n; opus_int32 x_Q16, y32; y32 = p[ dd ]; /* Q16 */ x_Q16 = silk_LSHIFT( x, 4 ); if ( opus_likely( 8 == dd ) ) { y32 = (opus_int32) silk_SMLAWW( p[ 7 ], y32, x_Q16 ); y32 = (opus_int32) silk_SMLAWW( p[ 6 ], y32, x_Q16 ); y32 = (opus_int32) silk_SMLAWW( p[ 5 ], y32, x_Q16 ); y32 = (opus_int32) silk_SMLAWW( p[ 4 ], y32, x_Q16 ); y32 = (opus_int32) silk_SMLAWW( p[ 3 ], y32, x_Q16 ); y32 = (opus_int32) silk_SMLAWW( p[ 2 ], y32, x_Q16 ); y32 = (opus_int32) silk_SMLAWW( p[ 1 ], y32, x_Q16 ); y32 = (opus_int32) silk_SMLAWW( p[ 0 ], y32, x_Q16 ); } else { for( n = dd - 1; n >= 0; n-- ) { y32 = (opus_int32) silk_SMLAWW( p[ n ], y32, x_Q16 ); /* Q16 */ } } return y32; }
/* Gains scalar dequantization, uniform on log scale */ void silk_gains_dequant( opus_int32 gain_Q16[ MAX_NB_SUBFR ], /* O quantized gains */ const opus_int8 ind[ MAX_NB_SUBFR ], /* I gain indices */ opus_int8 *prev_ind, /* I/O last index in previous frame */ const opus_int conditional, /* I first gain is delta coded if 1 */ const opus_int nb_subfr /* I number of subframes */ ) { opus_int k, ind_tmp, double_step_size_threshold; for( k = 0; k < nb_subfr; k++ ) { if( k == 0 && conditional == 0 ) { /* Gain index is not allowed to go down more than 16 steps (~21.8 dB) */ *prev_ind = silk_max_int( ind[ k ], *prev_ind - 16 ); } else { /* Delta index */ ind_tmp = ind[ k ] + MIN_DELTA_GAIN_QUANT; /* Accumulate deltas */ double_step_size_threshold = 2 * MAX_DELTA_GAIN_QUANT - N_LEVELS_QGAIN + *prev_ind; if( ind_tmp > double_step_size_threshold ) { *prev_ind += silk_LSHIFT( ind_tmp, 1 ) - double_step_size_threshold; } else { *prev_ind += ind_tmp; } } *prev_ind = silk_LIMIT_int( *prev_ind, 0, N_LEVELS_QGAIN - 1 ); /* Scale and convert to linear scale */ gain_Q16[ k ] = silk_log2lin( silk_min_32( silk_SMULWB( INV_SCALE_Q16, *prev_ind ) + OFFSET, 3967 ) ); /* 3967 = 31 in Q7 */ } }
void silk_NLSF_decode( opus_int16 *pNLSF_Q15, /* O Quantized NLSF vector [ LPC_ORDER ] */ opus_int8 *NLSFIndices, /* I Codebook path vector [ LPC_ORDER + 1 ] */ const silk_NLSF_CB_struct *psNLSF_CB /* I Codebook object */ ) { opus_int i; opus_uint8 pred_Q8[ MAX_LPC_ORDER ]; opus_int16 ec_ix[ MAX_LPC_ORDER ]; opus_int16 res_Q10[ MAX_LPC_ORDER ]; opus_int32 NLSF_Q15_tmp; const opus_uint8 *pCB_element; const opus_int16 *pCB_Wght_Q9; /* Unpack entropy table indices and predictor for current CB1 index */ silk_NLSF_unpack( ec_ix, pred_Q8, psNLSF_CB, NLSFIndices[ 0 ] ); /* Predictive residual dequantizer */ silk_NLSF_residual_dequant( res_Q10, &NLSFIndices[ 1 ], pred_Q8, psNLSF_CB->quantStepSize_Q16, psNLSF_CB->order ); /* Apply inverse square-rooted weights to first stage and add to output */ pCB_element = &psNLSF_CB->CB1_NLSF_Q8[ NLSFIndices[ 0 ] * psNLSF_CB->order ]; pCB_Wght_Q9 = &psNLSF_CB->CB1_Wght_Q9[ NLSFIndices[ 0 ] * psNLSF_CB->order ]; for( i = 0; i < psNLSF_CB->order; i++ ) { NLSF_Q15_tmp = silk_ADD_LSHIFT32( silk_DIV32_16( silk_LSHIFT( (opus_int32)res_Q10[ i ], 14 ), pCB_Wght_Q9[ i ] ), (opus_int16)pCB_element[ i ], 7 ); pNLSF_Q15[ i ] = (opus_int16)silk_LIMIT( NLSF_Q15_tmp, 0, 32767 ); } /* NLSF stabilization */ silk_NLSF_stabilize( pNLSF_Q15, psNLSF_CB->deltaMin_Q15, psNLSF_CB->order ); }
/* Processing of gains */ void silk_process_gains_FIX( silk_encoder_state_FIX *psEnc, /* I/O Encoder state */ silk_encoder_control_FIX *psEncCtrl, /* I/O Encoder control */ opus_int condCoding /* I The type of conditional coding to use */ ) { silk_shape_state_FIX *psShapeSt = &psEnc->sShape; opus_int k; opus_int32 s_Q16, InvMaxSqrVal_Q16, gain, gain_squared, ResNrg, ResNrgPart, quant_offset_Q10; /* Gain reduction when LTP coding gain is high */ if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /*s = -0.5f * silk_sigmoid( 0.25f * ( psEncCtrl->LTPredCodGain - 12.0f ) ); */ s_Q16 = -silk_sigm_Q15( silk_RSHIFT_ROUND( psEncCtrl->LTPredCodGain_Q7 - SILK_FIX_CONST( 12.0, 7 ), 4 ) ); for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { psEncCtrl->Gains_Q16[ k ] = silk_SMLAWB( psEncCtrl->Gains_Q16[ k ], psEncCtrl->Gains_Q16[ k ], s_Q16 ); } } /* Limit the quantized signal */ /* InvMaxSqrVal = pow( 2.0f, 0.33f * ( 21.0f - SNR_dB ) ) / subfr_length; */ InvMaxSqrVal_Q16 = silk_DIV32_16( silk_log2lin( silk_SMULWB( SILK_FIX_CONST( 21 + 16 / 0.33, 7 ) - psEnc->sCmn.SNR_dB_Q7, SILK_FIX_CONST( 0.33, 16 ) ) ), psEnc->sCmn.subfr_length ); for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { /* Soft limit on ratio residual energy and squared gains */ ResNrg = psEncCtrl->ResNrg[ k ]; ResNrgPart = silk_SMULWW( ResNrg, InvMaxSqrVal_Q16 ); if( psEncCtrl->ResNrgQ[ k ] > 0 ) { ResNrgPart = silk_RSHIFT_ROUND( ResNrgPart, psEncCtrl->ResNrgQ[ k ] ); } else { if( ResNrgPart >= silk_RSHIFT( silk_int32_MAX, -psEncCtrl->ResNrgQ[ k ] ) ) { ResNrgPart = silk_int32_MAX; } else { ResNrgPart = silk_LSHIFT( ResNrgPart, -psEncCtrl->ResNrgQ[ k ] ); } } gain = psEncCtrl->Gains_Q16[ k ]; gain_squared = silk_ADD_SAT32( ResNrgPart, silk_SMMUL( gain, gain ) ); if( gain_squared < silk_int16_MAX ) { /* recalculate with higher precision */ gain_squared = silk_SMLAWW( silk_LSHIFT( ResNrgPart, 16 ), gain, gain ); silk_assert( gain_squared > 0 ); gain = silk_SQRT_APPROX( gain_squared ); /* Q8 */ gain = silk_min( gain, silk_int32_MAX >> 8 ); psEncCtrl->Gains_Q16[ k ] = silk_LSHIFT_SAT32( gain, 8 ); /* Q16 */ } else {
/* Convert int32 coefficients to int16 coefs and make sure there's no wrap-around */ void silk_LPC_fit( opus_int16 *a_QOUT, /* O Output signal */ opus_int32 *a_QIN, /* I/O Input signal */ const opus_int QOUT, /* I Input Q domain */ const opus_int QIN, /* I Input Q domain */ const opus_int d /* I Filter order */ ) { opus_int i, k, idx = 0; opus_int32 maxabs, absval, chirp_Q16; /* Limit the maximum absolute value of the prediction coefficients, so that they'll fit in int16 */ for( i = 0; i < 10; i++ ) { /* Find maximum absolute value and its index */ maxabs = 0; for( k = 0; k < d; k++ ) { absval = silk_abs( a_QIN[k] ); if( absval > maxabs ) { maxabs = absval; idx = k; } } maxabs = silk_RSHIFT_ROUND( maxabs, QIN - QOUT ); if( maxabs > silk_int16_MAX ) { /* Reduce magnitude of prediction coefficients */ maxabs = silk_min( maxabs, 163838 ); /* ( silk_int32_MAX >> 14 ) + silk_int16_MAX = 163838 */ chirp_Q16 = SILK_FIX_CONST( 0.999, 16 ) - silk_DIV32( silk_LSHIFT( maxabs - silk_int16_MAX, 14 ), silk_RSHIFT32( silk_MUL( maxabs, idx + 1), 2 ) ); silk_bwexpander_32( a_QIN, d, chirp_Q16 ); } else { break; } } if( i == 10 ) { /* Reached the last iteration, clip the coefficients */ for( k = 0; k < d; k++ ) { a_QOUT[ k ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( a_QIN[ k ], QIN - QOUT ) ); a_QIN[ k ] = silk_LSHIFT( (opus_int32)a_QOUT[ k ], QIN - QOUT ); } } else { for( k = 0; k < d; k++ ) { a_QOUT[ k ] = (opus_int16)silk_RSHIFT_ROUND( a_QIN[ k ], QIN - QOUT ); } } }
void silk_PLC_Reset( silk_decoder_state *psDec /* I/O Decoder state */ ) { psDec->sPLC.pitchL_Q8 = silk_LSHIFT( psDec->frame_length, 8 - 1 ); psDec->sPLC.prevGain_Q16[ 0 ] = SILK_FIX_CONST( 1, 16 ); psDec->sPLC.prevGain_Q16[ 1 ] = SILK_FIX_CONST( 1, 16 ); psDec->sPLC.subfr_length = 20; psDec->sPLC.nb_subfr = 2; }
/* Step up function, converts reflection coefficients to prediction coefficients */ void silk_k2a( opus_int32 *A_Q24, /* O Prediction coefficients [order] Q24 */ const opus_int16 *rc_Q15, /* I Reflection coefficients [order] Q15 */ const opus_int32 order /* I Prediction order */ ) { opus_int k, n; opus_int32 Atmp[ SILK_MAX_ORDER_LPC ]; for( k = 0; k < order; k++ ) { for( n = 0; n < k; n++ ) { Atmp[ n ] = A_Q24[ n ]; } for( n = 0; n < k; n++ ) { A_Q24[ n ] = silk_SMLAWB( A_Q24[ n ], silk_LSHIFT( Atmp[ k - n - 1 ], 1 ), rc_Q15[ k ] ); } A_Q24[ k ] = -silk_LSHIFT( (opus_int32)rc_Q15[ k ], 9 ); } }
/* Compute autocorrelation */ void silk_autocorr( opus_int32 *results, /* O Result (length correlationCount) */ opus_int *scale, /* O Scaling of the correlation vector */ const opus_int16 *inputData, /* I Input data to correlate */ const opus_int inputDataSize, /* I Length of input */ const opus_int correlationCount /* I Number of correlation taps to compute */ ) { opus_int i, lz, nRightShifts, corrCount; opus_int64 corr64; corrCount = silk_min_int( inputDataSize, correlationCount ); /* compute energy (zero-lag correlation) */ corr64 = silk_inner_prod16_aligned_64( inputData, inputData, inputDataSize ); /* deal with all-zero input data */ corr64 += 1; /* number of leading zeros */ lz = silk_CLZ64( corr64 ); /* scaling: number of right shifts applied to correlations */ nRightShifts = 35 - lz; *scale = nRightShifts; if( nRightShifts <= 0 ) { results[ 0 ] = silk_LSHIFT( (opus_int32)silk_CHECK_FIT32( corr64 ), -nRightShifts ); /* compute remaining correlations based on int32 inner product */ for( i = 1; i < corrCount; i++ ) { results[ i ] = silk_LSHIFT( silk_inner_prod_aligned( inputData, inputData + i, inputDataSize - i ), -nRightShifts ); } } else { results[ 0 ] = (opus_int32)silk_CHECK_FIT32( silk_RSHIFT64( corr64, nRightShifts ) ); /* compute remaining correlations based on int64 inner product */ for( i = 1; i < corrCount; i++ ) { results[ i ] = (opus_int32)silk_CHECK_FIT32( silk_RSHIFT64( silk_inner_prod16_aligned_64( inputData, inputData + i, inputDataSize - i ), nRightShifts ) ); } } }
opus_int silk_setup_resamplers( silk_encoder_state_Fxx *psEnc, /* I/O */ opus_int fs_kHz /* I */ ) { opus_int ret = SILK_NO_ERROR; opus_int32 nSamples_temp; if( psEnc->sCmn.fs_kHz != fs_kHz || psEnc->sCmn.prev_API_fs_Hz != psEnc->sCmn.API_fs_Hz ) { if( psEnc->sCmn.fs_kHz == 0 ) { /* Initialize the resampler for enc_API.c preparing resampling from API_fs_Hz to fs_kHz */ ret += silk_resampler_init( &psEnc->sCmn.resampler_state, psEnc->sCmn.API_fs_Hz, fs_kHz * 1000, 1 ); } else { /* Allocate worst case space for temporary upsampling, 8 to 48 kHz, so a factor 6 */ opus_int16 x_buf_API_fs_Hz[ ( 2 * MAX_FRAME_LENGTH_MS + LA_SHAPE_MS ) * MAX_API_FS_KHZ ]; silk_resampler_state_struct temp_resampler_state; #ifdef FIXED_POINT opus_int16 *x_bufFIX = psEnc->x_buf; #else opus_int16 x_bufFIX[ 2 * MAX_FRAME_LENGTH + LA_SHAPE_MAX ]; #endif nSamples_temp = silk_LSHIFT( psEnc->sCmn.frame_length, 1 ) + LA_SHAPE_MS * psEnc->sCmn.fs_kHz; #ifndef FIXED_POINT silk_float2short_array( x_bufFIX, psEnc->x_buf, nSamples_temp ); #endif /* Initialize resampler for temporary resampling of x_buf data to API_fs_Hz */ ret += silk_resampler_init( &temp_resampler_state, silk_SMULBB( psEnc->sCmn.fs_kHz, 1000 ), psEnc->sCmn.API_fs_Hz, 0 ); /* Temporary resampling of x_buf data to API_fs_Hz */ ret += silk_resampler( &temp_resampler_state, x_buf_API_fs_Hz, x_bufFIX, nSamples_temp ); /* Calculate number of samples that has been temporarily upsampled */ nSamples_temp = silk_DIV32_16( nSamples_temp * psEnc->sCmn.API_fs_Hz, silk_SMULBB( psEnc->sCmn.fs_kHz, 1000 ) ); /* Initialize the resampler for enc_API.c preparing resampling from API_fs_Hz to fs_kHz */ ret += silk_resampler_init( &psEnc->sCmn.resampler_state, psEnc->sCmn.API_fs_Hz, silk_SMULBB( fs_kHz, 1000 ), 1 ); /* Correct resampler state by resampling buffered data from API_fs_Hz to fs_kHz */ ret += silk_resampler( &psEnc->sCmn.resampler_state, x_bufFIX, x_buf_API_fs_Hz, nSamples_temp ); #ifndef FIXED_POINT silk_short2float_array( psEnc->x_buf, x_bufFIX, ( 2 * MAX_FRAME_LENGTH_MS + LA_SHAPE_MS ) * fs_kHz ); #endif } } psEnc->sCmn.prev_API_fs_Hz = psEnc->sCmn.API_fs_Hz; return ret; }
/* helper function for NLSF2A(..) */ static OPUS_INLINE void silk_NLSF2A_find_poly( opus_int32 *out, /* O intermediate polynomial, QA [dd+1] */ const opus_int32 *cLSF, /* I vector of interleaved 2*cos(LSFs), QA [d] */ opus_int dd /* I polynomial order (= 1/2 * filter order) */ ) { opus_int k, n; opus_int32 ftmp; out[0] = silk_LSHIFT( 1, QA ); out[1] = -cLSF[0]; for( k = 1; k < dd; k++ ) { ftmp = cLSF[2*k]; /* QA*/ out[k+1] = silk_LSHIFT( out[k-1], 1 ) - (opus_int32)silk_RSHIFT_ROUND64( silk_SMULL( ftmp, out[k] ), QA ); for( n = k; n > 1; n-- ) { out[n] += out[n-2] - (opus_int32)silk_RSHIFT_ROUND64( silk_SMULL( ftmp, out[n-1] ), QA ); } out[1] -= ftmp; } }
/* Convert input to a log scale */ opus_int32 silk_lin2log( const opus_int32 inLin /* I input in linear scale */ ) { opus_int32 lz, frac_Q7; silk_CLZ_FRAC( inLin, &lz, &frac_Q7 ); /* Piece-wise parabolic approximation */ return silk_LSHIFT( 31 - lz, 7 ) + silk_SMLAWB( frac_Q7, silk_MUL( frac_Q7, 128 - frac_Q7 ), 179 ); }
void silk_warped_LPC_analysis_filter_FIX( opus_int32 state[], /* I/O State [order + 1] */ opus_int32 res_Q2[], /* O Residual signal [length] */ const opus_int16 coef_Q13[], /* I Coefficients [order] */ const opus_int16 input[], /* I Input signal [length] */ const opus_int16 lambda_Q16, /* I Warping factor */ const opus_int length, /* I Length of input signal */ const opus_int order /* I Filter order (even) */ ) { opus_int n, i; opus_int32 acc_Q11, tmp1, tmp2; /* Order must be even */ silk_assert( ( order & 1 ) == 0 ); for( n = 0; n < length; n++ ) { /* Output of lowpass section */ tmp2 = silk_SMLAWB( state[ 0 ], state[ 1 ], lambda_Q16 ); state[ 0 ] = silk_LSHIFT( input[ n ], 14 ); /* Output of allpass section */ tmp1 = silk_SMLAWB( state[ 1 ], state[ 2 ] - tmp2, lambda_Q16 ); state[ 1 ] = tmp2; acc_Q11 = silk_RSHIFT( order, 1 ); acc_Q11 = silk_SMLAWB( acc_Q11, tmp2, coef_Q13[ 0 ] ); /* Loop over allpass sections */ for( i = 2; i < order; i += 2 ) { /* Output of allpass section */ tmp2 = silk_SMLAWB( state[ i ], state[ i + 1 ] - tmp1, lambda_Q16 ); state[ i ] = tmp1; acc_Q11 = silk_SMLAWB( acc_Q11, tmp1, coef_Q13[ i - 1 ] ); /* Output of allpass section */ tmp1 = silk_SMLAWB( state[ i + 1 ], state[ i + 2 ] - tmp2, lambda_Q16 ); state[ i + 1 ] = tmp2; acc_Q11 = silk_SMLAWB( acc_Q11, tmp2, coef_Q13[ i ] ); } state[ order ] = tmp1; acc_Q11 = silk_SMLAWB( acc_Q11, tmp1, coef_Q13[ order - 1 ] ); res_Q2[ n ] = silk_LSHIFT( (opus_int32)input[ n ], 2 ) - silk_RSHIFT_ROUND( acc_Q11, 9 ); } }
/* Gain scalar quantization with hysteresis, uniform on log scale */ void silk_gains_quant( opus_int8 ind[ MAX_NB_SUBFR ], /* O gain indices */ opus_int32 gain_Q16[ MAX_NB_SUBFR ], /* I/O gains (quantized out) */ opus_int8 *prev_ind, /* I/O last index in previous frame */ const opus_int conditional, /* I first gain is delta coded if 1 */ const opus_int nb_subfr /* I number of subframes */ ) { opus_int k, double_step_size_threshold; for( k = 0; k < nb_subfr; k++ ) { /* Convert to log scale, scale, floor() */ ind[ k ] = silk_SMULWB( SCALE_Q16, silk_lin2log( gain_Q16[ k ] ) - OFFSET ); /* Round towards previous quantized gain (hysteresis) */ if( ind[ k ] < *prev_ind ) { ind[ k ]++; } ind[ k ] = silk_LIMIT_int( ind[ k ], 0, N_LEVELS_QGAIN - 1 ); /* Compute delta indices and limit */ if( k == 0 && conditional == 0 ) { /* Full index */ ind[ k ] = silk_LIMIT_int( ind[ k ], *prev_ind + MIN_DELTA_GAIN_QUANT, N_LEVELS_QGAIN - 1 ); *prev_ind = ind[ k ]; } else { /* Delta index */ ind[ k ] = ind[ k ] - *prev_ind; /* Double the quantization step size for large gain increases, so that the max gain level can be reached */ double_step_size_threshold = 2 * MAX_DELTA_GAIN_QUANT - N_LEVELS_QGAIN + *prev_ind; if( ind[ k ] > double_step_size_threshold ) { ind[ k ] = double_step_size_threshold + silk_RSHIFT( ind[ k ] - double_step_size_threshold + 1, 1 ); } ind[ k ] = silk_LIMIT_int( ind[ k ], MIN_DELTA_GAIN_QUANT, MAX_DELTA_GAIN_QUANT ); /* Accumulate deltas */ if( ind[ k ] > double_step_size_threshold ) { *prev_ind += silk_LSHIFT( ind[ k ], 1 ) - double_step_size_threshold; *prev_ind = silk_min_int( *prev_ind, N_LEVELS_QGAIN - 1 ); } else { *prev_ind += ind[ k ]; } /* Shift to make non-negative */ ind[ k ] -= MIN_DELTA_GAIN_QUANT; } /* Scale and convert to linear scale */ gain_Q16[ k ] = silk_log2lin( silk_min_32( silk_SMULWB( INV_SCALE_Q16, *prev_ind ) + OFFSET, 3967 ) ); /* 3967 = 31 in Q7 */ } }
void silk_NLSF_decode( opus_int16 *pNLSF_Q15, /* O Quantized NLSF vector [ LPC_ORDER ] */ opus_int8 *NLSFIndices, /* I Codebook path vector [ LPC_ORDER + 1 ] */ const silk_NLSF_CB_struct *psNLSF_CB /* I Codebook object */ ) { opus_int i; opus_uint8 pred_Q8[ MAX_LPC_ORDER ]; opus_int16 ec_ix[ MAX_LPC_ORDER ]; opus_int16 res_Q10[ MAX_LPC_ORDER ]; opus_int16 W_tmp_QW[ MAX_LPC_ORDER ]; opus_int32 W_tmp_Q9, NLSF_Q15_tmp; const opus_uint8 *pCB_element; /* Decode first stage */ pCB_element = &psNLSF_CB->CB1_NLSF_Q8[ NLSFIndices[ 0 ] * psNLSF_CB->order ]; for( i = 0; i < psNLSF_CB->order; i++ ) { pNLSF_Q15[ i ] = silk_LSHIFT( (opus_int16)pCB_element[ i ], 7 ); } /* Unpack entropy table indices and predictor for current CB1 index */ silk_NLSF_unpack( ec_ix, pred_Q8, psNLSF_CB, NLSFIndices[ 0 ] ); /* Predictive residual dequantizer */ silk_NLSF_residual_dequant( res_Q10, &NLSFIndices[ 1 ], pred_Q8, psNLSF_CB->quantStepSize_Q16, psNLSF_CB->order ); /* Weights from codebook vector */ silk_NLSF_VQ_weights_laroia( W_tmp_QW, pNLSF_Q15, psNLSF_CB->order ); /* Apply inverse square-rooted weights and add to output */ for( i = 0; i < psNLSF_CB->order; i++ ) { W_tmp_Q9 = silk_SQRT_APPROX( silk_LSHIFT( (opus_int32)W_tmp_QW[ i ], 18 - NLSF_W_Q ) ); NLSF_Q15_tmp = silk_ADD32( pNLSF_Q15[ i ], silk_DIV32_16( silk_LSHIFT( (opus_int32)res_Q10[ i ], 14 ), W_tmp_Q9 ) ); pNLSF_Q15[ i ] = (opus_int16)silk_LIMIT( NLSF_Q15_tmp, 0, 32767 ); } /* NLSF stabilization */ silk_NLSF_stabilize( pNLSF_Q15, psNLSF_CB->deltaMin_Q15, psNLSF_CB->order ); }
/* Transforms polynomials from cos(n*f) to cos(f)^n */ static inline void silk_A2NLSF_trans_poly(int32_t * p, /* I/O Polynomial */ const int dd /* I Polynomial order (= filter order / 2 ) */ ) { int k, n; for (k = 2; k <= dd; k++) { for (n = dd; n > k; n--) { p[n - 2] -= p[n]; } p[k - 2] -= silk_LSHIFT(p[k], 1); } }
/* Transforms polynomials from cos(n*f) to cos(f)^n */ static OPUS_INLINE void silk_A2NLSF_trans_poly( opus_int32 *p, /* I/O Polynomial */ const opus_int dd /* I Polynomial order (= filter order / 2 ) */ ) { opus_int k, n; for( k = 2; k <= dd; k++ ) { for( n = dd; n > k; n-- ) { p[ n - 2 ] -= p[ n ]; } p[ k - 2 ] -= silk_LSHIFT( p[ k ], 1 ); } }
static inline void silk_A2NLSF_init( const opus_int32 *a_Q16, opus_int32 *P, opus_int32 *Q, const opus_int dd ) { opus_int k; /* Convert filter coefs to even and odd polynomials */ P[dd] = silk_LSHIFT( 1, QPoly ); Q[dd] = silk_LSHIFT( 1, QPoly ); for( k = 0; k < dd; k++ ) { #if( QPoly < 16 ) P[ k ] = silk_RSHIFT_ROUND( -a_Q16[ dd - k - 1 ] - a_Q16[ dd + k ], 16 - QPoly ); /* QPoly */ Q[ k ] = silk_RSHIFT_ROUND( -a_Q16[ dd - k - 1 ] + a_Q16[ dd + k ], 16 - QPoly ); /* QPoly */ #elif( Qpoly == 16 ) P[ k ] = -a_Q16[ dd - k - 1 ] - a_Q16[ dd + k ]; /* QPoly*/ Q[ k ] = -a_Q16[ dd - k - 1 ] + a_Q16[ dd + k ]; /* QPoly*/ #else P[ k ] = silk_LSHIFT( -a_Q16[ dd - k - 1 ] - a_Q16[ dd + k ], QPoly - 16 ); /* QPoly */ Q[ k ] = silk_LSHIFT( -a_Q16[ dd - k - 1 ] + a_Q16[ dd + k ], QPoly - 16 ); /* QPoly */ #endif } /* Divide out zeros as we have that for even filter orders, */ /* z = 1 is always a root in Q, and */ /* z = -1 is always a root in P */ for( k = dd; k > 0; k-- ) { P[ k - 1 ] -= P[ k ]; Q[ k - 1 ] += Q[ k ]; } /* Transform polynomials from cos(n*f) to cos(f)^n */ silk_A2NLSF_trans_poly( P, dd ); silk_A2NLSF_trans_poly( Q, dd ); }
/* Split signal into two decimated bands using first-order allpass filters */ void silk_ana_filt_bank_1( const opus_int16 *in, /* I Input signal [N] */ opus_int32 *S, /* I/O State vector [2] */ opus_int16 *outL, /* O Low band [N/2] */ opus_int16 *outH, /* O High band [N/2] */ const opus_int32 N /* I Number of input samples */ ) { opus_int k, N2 = silk_RSHIFT( N, 1 ); opus_int32 in32, X, Y, out_1, out_2; /* Internal variables and state are in Q10 format */ for( k = 0; k < N2; k++ ) { /* Convert to Q10 */ in32 = silk_LSHIFT( (opus_int32)in[ 2 * k ], 10 ); /* All-pass section for even input sample */ Y = silk_SUB32( in32, S[ 0 ] ); X = silk_SMLAWB( Y, Y, A_fb1_21 ); out_1 = silk_ADD32( S[ 0 ], X ); S[ 0 ] = silk_ADD32( in32, X ); /* Convert to Q10 */ in32 = silk_LSHIFT( (opus_int32)in[ 2 * k + 1 ], 10 ); /* All-pass section for odd input sample, and add to output of previous section */ Y = silk_SUB32( in32, S[ 1 ] ); X = silk_SMULWB( Y, A_fb1_20 ); out_2 = silk_ADD32( S[ 1 ], X ); S[ 1 ] = silk_ADD32( in32, X ); /* Add/subtract, convert back to int16 and store to output */ outL[ k ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_ADD32( out_2, out_1 ), 11 ) ); outH[ k ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_SUB32( out_2, out_1 ), 11 ) ); } }
/* Deactivate by setting psEncC->mode = 0; */ void silk_LP_variable_cutoff( silk_LP_state *psLP, /* I/O LP filter state */ opus_int16 *frame, /* I/O Low-pass filtered output signal */ const opus_int frame_length /* I Frame length */ ) { opus_int32 B_Q28[ TRANSITION_NB ], A_Q28[ TRANSITION_NA ], fac_Q16 = 0; opus_int ind = 0; silk_assert( psLP->transition_frame_no >= 0 && psLP->transition_frame_no <= TRANSITION_FRAMES ); /* Run filter if needed */ if( psLP->mode != 0 ) { /* Calculate index and interpolation factor for interpolation */ #if( TRANSITION_INT_STEPS == 64 ) fac_Q16 = silk_LSHIFT( TRANSITION_FRAMES - psLP->transition_frame_no, 16 - 6 ); #else fac_Q16 = silk_DIV32_16( silk_LSHIFT( TRANSITION_FRAMES - psLP->transition_frame_no, 16 ), TRANSITION_FRAMES ); #endif ind = silk_RSHIFT( fac_Q16, 16 ); fac_Q16 -= silk_LSHIFT( ind, 16 ); silk_assert( ind >= 0 ); silk_assert( ind < TRANSITION_INT_NUM ); /* Interpolate filter coefficients */ silk_LP_interpolate_filter_taps( B_Q28, A_Q28, ind, fac_Q16 ); /* Update transition frame number for next frame */ psLP->transition_frame_no = silk_LIMIT( psLP->transition_frame_no + psLP->mode, 0, TRANSITION_FRAMES ); /* ARMA low-pass filtering */ silk_assert( TRANSITION_NB == 3 && TRANSITION_NA == 2 ); silk_biquad_alt( frame, B_Q28, A_Q28, psLP->In_LP_State, frame, frame_length, 1); } }
/* Polynomial evaluation */ static inline int32_t silk_A2NLSF_eval_poly( /* return the polynomial evaluation, in Q16 */ int32_t * p, /* I Polynomial, Q16 */ const int32_t x, /* I Evaluation point, Q12 */ const int dd /* I Order */ ) { int n; int32_t x_Q16, y32; y32 = p[dd]; /* Q16 */ x_Q16 = silk_LSHIFT(x, 4); for (n = dd - 1; n >= 0; n--) { y32 = silk_SMLAWW(p[n], y32, x_Q16); /* Q16 */ } return y32; }
/* Polynomial evaluation */ static OPUS_INLINE opus_int32 silk_A2NLSF_eval_poly( /* return the polynomial evaluation, in Q16 */ opus_int32 *p, /* I Polynomial, Q16 */ const opus_int32 x, /* I Evaluation point, Q12 */ const opus_int dd /* I Order */ ) { opus_int n; opus_int32 x_Q16, y32; y32 = p[ dd ]; /* Q16 */ x_Q16 = silk_LSHIFT( x, 4 ); for( n = dd - 1; n >= 0; n-- ) { y32 = silk_SMLAWW( p[ n ], y32, x_Q16 ); /* Q16 */ } return y32; }
void silk_LPC_analysis_filter( opus_int16 *out, /* O Output signal */ const opus_int16 *in, /* I Input signal */ const opus_int16 *B, /* I MA prediction coefficients, Q12 [order] */ const opus_int32 len, /* I Signal length */ const opus_int32 d /* I Filter order */ ) { opus_int ix, j; opus_int32 out32_Q12, out32; const opus_int16 *in_ptr; silk_assert( d >= 6 ); silk_assert( (d & 1) == 0 ); silk_assert( d <= len ); for( ix = d; ix < len; ix++ ) { in_ptr = &in[ ix - 1 ]; out32_Q12 = silk_SMULBB( in_ptr[ 0 ], B[ 0 ] ); /* Allowing wrap around so that two wraps can cancel each other. The rare cases where the result wraps around can only be triggered by invalid streams*/ out32_Q12 = silk_SMLABB_ovflw( out32_Q12, in_ptr[ -1 ], B[ 1 ] ); out32_Q12 = silk_SMLABB_ovflw( out32_Q12, in_ptr[ -2 ], B[ 2 ] ); out32_Q12 = silk_SMLABB_ovflw( out32_Q12, in_ptr[ -3 ], B[ 3 ] ); out32_Q12 = silk_SMLABB_ovflw( out32_Q12, in_ptr[ -4 ], B[ 4 ] ); out32_Q12 = silk_SMLABB_ovflw( out32_Q12, in_ptr[ -5 ], B[ 5 ] ); for( j = 6; j < d; j += 2 ) { out32_Q12 = silk_SMLABB_ovflw( out32_Q12, in_ptr[ -j ], B[ j ] ); out32_Q12 = silk_SMLABB_ovflw( out32_Q12, in_ptr[ -j - 1 ], B[ j + 1 ] ); } /* Subtract prediction */ out32_Q12 = silk_SUB32_ovflw( silk_LSHIFT( (opus_int32)in_ptr[ 1 ], 12 ), out32_Q12 ); /* Scale to Q0 */ out32 = silk_RSHIFT_ROUND( out32_Q12, 12 ); /* Saturate output */ out[ ix ] = (opus_int16)silk_SAT16( out32 ); } /* Set first d output samples to zero */ silk_memset( out, 0, d * sizeof( opus_int16 ) ); }