/* Compute noise shaping coefficients and initial gain values */ void silk_noise_shape_analysis_FLP( silk_encoder_state_FLP *psEnc, /* I/O Encoder state FLP */ silk_encoder_control_FLP *psEncCtrl, /* I/O Encoder control FLP */ const silk_float *pitch_res, /* I LPC residual from pitch analysis */ const silk_float *x /* I Input signal [frame_length + la_shape] */ ) { silk_shape_state_FLP *psShapeSt = &psEnc->sShape; opus_int k, nSamples; silk_float SNR_adj_dB, HarmBoost, HarmShapeGain, Tilt; silk_float nrg, pre_nrg, log_energy, log_energy_prev, energy_variation; silk_float delta, BWExp1, BWExp2, gain_mult, gain_add, strength, b, warping; silk_float x_windowed[ SHAPE_LPC_WIN_MAX ]; silk_float auto_corr[ MAX_SHAPE_LPC_ORDER + 1 ]; const silk_float *x_ptr, *pitch_res_ptr; /* Point to start of first LPC analysis block */ x_ptr = x - psEnc->sCmn.la_shape; /****************/ /* GAIN CONTROL */ /****************/ SNR_adj_dB = psEnc->sCmn.SNR_dB_Q7 * ( 1 / 128.0f ); /* Input quality is the average of the quality in the lowest two VAD bands */ psEncCtrl->input_quality = 0.5f * ( psEnc->sCmn.input_quality_bands_Q15[ 0 ] + psEnc->sCmn.input_quality_bands_Q15[ 1 ] ) * ( 1.0f / 32768.0f ); /* Coding quality level, between 0.0 and 1.0 */ psEncCtrl->coding_quality = silk_sigmoid( 0.25f * ( SNR_adj_dB - 20.0f ) ); if( psEnc->sCmn.useCBR == 0 ) { /* Reduce coding SNR during low speech activity */ b = 1.0f - psEnc->sCmn.speech_activity_Q8 * ( 1.0f / 256.0f ); SNR_adj_dB -= BG_SNR_DECR_dB * psEncCtrl->coding_quality * ( 0.5f + 0.5f * psEncCtrl->input_quality ) * b * b; } if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* Reduce gains for periodic signals */ SNR_adj_dB += HARM_SNR_INCR_dB * psEnc->LTPCorr; } else { /* For unvoiced signals and low-quality input, adjust the quality slower than SNR_dB setting */ SNR_adj_dB += ( -0.4f * psEnc->sCmn.SNR_dB_Q7 * ( 1 / 128.0f ) + 6.0f ) * ( 1.0f - psEncCtrl->input_quality ); } /*************************/ /* SPARSENESS PROCESSING */ /*************************/ /* Set quantizer offset */ if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* Initially set to 0; may be overruled in process_gains(..) */ psEnc->sCmn.indices.quantOffsetType = 0; psEncCtrl->sparseness = 0.0f; } else { /* Sparseness measure, based on relative fluctuations of energy per 2 milliseconds */ nSamples = 2 * psEnc->sCmn.fs_kHz; energy_variation = 0.0f; log_energy_prev = 0.0f; pitch_res_ptr = pitch_res; for( k = 0; k < silk_SMULBB( SUB_FRAME_LENGTH_MS, psEnc->sCmn.nb_subfr ) / 2; k++ ) { nrg = ( silk_float )nSamples + ( silk_float )silk_energy_FLP( pitch_res_ptr, nSamples ); log_energy = silk_log2( nrg ); if( k > 0 ) { energy_variation += silk_abs_float( log_energy - log_energy_prev ); } log_energy_prev = log_energy; pitch_res_ptr += nSamples; } psEncCtrl->sparseness = silk_sigmoid( 0.4f * ( energy_variation - 5.0f ) ); /* Set quantization offset depending on sparseness measure */ if( psEncCtrl->sparseness > SPARSENESS_THRESHOLD_QNT_OFFSET ) { psEnc->sCmn.indices.quantOffsetType = 0; } else { psEnc->sCmn.indices.quantOffsetType = 1; } /* Increase coding SNR for sparse signals */ SNR_adj_dB += SPARSE_SNR_INCR_dB * ( psEncCtrl->sparseness - 0.5f ); } /*******************************/ /* Control bandwidth expansion */ /*******************************/ /* More BWE for signals with high prediction gain */ strength = FIND_PITCH_WHITE_NOISE_FRACTION * psEncCtrl->predGain; /* between 0.0 and 1.0 */ BWExp1 = BWExp2 = BANDWIDTH_EXPANSION / ( 1.0f + strength * strength ); delta = LOW_RATE_BANDWIDTH_EXPANSION_DELTA * ( 1.0f - 0.75f * psEncCtrl->coding_quality ); BWExp1 -= delta; BWExp2 += delta; /* BWExp1 will be applied after BWExp2, so make it relative */ BWExp1 /= BWExp2; if( psEnc->sCmn.warping_Q16 > 0 ) { /* Slightly more warping in analysis will move quantization noise up in frequency, where it's better masked */ warping = (silk_float)psEnc->sCmn.warping_Q16 / 65536.0f + 0.01f * psEncCtrl->coding_quality; } else { warping = 0.0f; } /********************************************/ /* Compute noise shaping AR coefs and gains */ /********************************************/ for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { /* Apply window: sine slope followed by flat part followed by cosine slope */ opus_int shift, slope_part, flat_part; flat_part = psEnc->sCmn.fs_kHz * 3; slope_part = ( psEnc->sCmn.shapeWinLength - flat_part ) / 2; silk_apply_sine_window_FLP( x_windowed, x_ptr, 1, slope_part ); shift = slope_part; silk_memcpy( x_windowed + shift, x_ptr + shift, flat_part * sizeof(silk_float) ); shift += flat_part; silk_apply_sine_window_FLP( x_windowed + shift, x_ptr + shift, 2, slope_part ); /* Update pointer: next LPC analysis block */ x_ptr += psEnc->sCmn.subfr_length; if( psEnc->sCmn.warping_Q16 > 0 ) { /* Calculate warped auto correlation */ silk_warped_autocorrelation_FLP( auto_corr, x_windowed, warping, psEnc->sCmn.shapeWinLength, psEnc->sCmn.shapingLPCOrder ); } else { /* Calculate regular auto correlation */ silk_autocorrelation_FLP( auto_corr, x_windowed, psEnc->sCmn.shapeWinLength, psEnc->sCmn.shapingLPCOrder + 1 ); } /* Add white noise, as a fraction of energy */ auto_corr[ 0 ] += auto_corr[ 0 ] * SHAPE_WHITE_NOISE_FRACTION; /* Convert correlations to prediction coefficients, and compute residual energy */ nrg = silk_levinsondurbin_FLP( &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ], auto_corr, psEnc->sCmn.shapingLPCOrder ); psEncCtrl->Gains[ k ] = ( silk_float )sqrt( nrg ); if( psEnc->sCmn.warping_Q16 > 0 ) { /* Adjust gain for warping */ psEncCtrl->Gains[ k ] *= warped_gain( &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ], warping, psEnc->sCmn.shapingLPCOrder ); } /* Bandwidth expansion for synthesis filter shaping */ silk_bwexpander_FLP( &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ], psEnc->sCmn.shapingLPCOrder, BWExp2 ); /* Compute noise shaping filter coefficients */ silk_memcpy( &psEncCtrl->AR1[ k * MAX_SHAPE_LPC_ORDER ], &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ], psEnc->sCmn.shapingLPCOrder * sizeof( silk_float ) ); /* Bandwidth expansion for analysis filter shaping */ silk_bwexpander_FLP( &psEncCtrl->AR1[ k * MAX_SHAPE_LPC_ORDER ], psEnc->sCmn.shapingLPCOrder, BWExp1 ); /* Ratio of prediction gains, in energy domain */ pre_nrg = silk_LPC_inverse_pred_gain_FLP( &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ], psEnc->sCmn.shapingLPCOrder ); nrg = silk_LPC_inverse_pred_gain_FLP( &psEncCtrl->AR1[ k * MAX_SHAPE_LPC_ORDER ], psEnc->sCmn.shapingLPCOrder ); psEncCtrl->GainsPre[ k ] = 1.0f - 0.7f * ( 1.0f - pre_nrg / nrg ); /* Convert to monic warped prediction coefficients and limit absolute values */ warped_true2monic_coefs( &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ], &psEncCtrl->AR1[ k * MAX_SHAPE_LPC_ORDER ], warping, 3.999f, psEnc->sCmn.shapingLPCOrder ); } /*****************/ /* Gain tweaking */ /*****************/ /* Increase gains during low speech activity */ gain_mult = (silk_float)pow( 2.0f, -0.16f * SNR_adj_dB ); gain_add = (silk_float)pow( 2.0f, 0.16f * MIN_QGAIN_DB ); for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { psEncCtrl->Gains[ k ] *= gain_mult; psEncCtrl->Gains[ k ] += gain_add; } gain_mult = 1.0f + INPUT_TILT + psEncCtrl->coding_quality * HIGH_RATE_INPUT_TILT; for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { psEncCtrl->GainsPre[ k ] *= gain_mult; } /************************************************/ /* Control low-frequency shaping and noise tilt */ /************************************************/ /* Less low frequency shaping for noisy inputs */ strength = LOW_FREQ_SHAPING * ( 1.0f + LOW_QUALITY_LOW_FREQ_SHAPING_DECR * ( psEnc->sCmn.input_quality_bands_Q15[ 0 ] * ( 1.0f / 32768.0f ) - 1.0f ) ); strength *= psEnc->sCmn.speech_activity_Q8 * ( 1.0f / 256.0f ); if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* Reduce low frequencies quantization noise for periodic signals, depending on pitch lag */ /*f = 400; freqz([1, -0.98 + 2e-4 * f], [1, -0.97 + 7e-4 * f], 2^12, Fs); axis([0, 1000, -10, 1])*/ for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { b = 0.2f / psEnc->sCmn.fs_kHz + 3.0f / psEncCtrl->pitchL[ k ]; psEncCtrl->LF_MA_shp[ k ] = -1.0f + b; psEncCtrl->LF_AR_shp[ k ] = 1.0f - b - b * strength; } Tilt = - HP_NOISE_COEF - (1 - HP_NOISE_COEF) * HARM_HP_NOISE_COEF * psEnc->sCmn.speech_activity_Q8 * ( 1.0f / 256.0f ); } else { b = 1.3f / psEnc->sCmn.fs_kHz; psEncCtrl->LF_MA_shp[ 0 ] = -1.0f + b; psEncCtrl->LF_AR_shp[ 0 ] = 1.0f - b - b * strength * 0.6f; for( k = 1; k < psEnc->sCmn.nb_subfr; k++ ) { psEncCtrl->LF_MA_shp[ k ] = psEncCtrl->LF_MA_shp[ 0 ]; psEncCtrl->LF_AR_shp[ k ] = psEncCtrl->LF_AR_shp[ 0 ]; } Tilt = -HP_NOISE_COEF; } /****************************/ /* HARMONIC SHAPING CONTROL */ /****************************/ /* Control boosting of harmonic frequencies */ HarmBoost = LOW_RATE_HARMONIC_BOOST * ( 1.0f - psEncCtrl->coding_quality ) * psEnc->LTPCorr; /* More harmonic boost for noisy input signals */ HarmBoost += LOW_INPUT_QUALITY_HARMONIC_BOOST * ( 1.0f - psEncCtrl->input_quality ); if( USE_HARM_SHAPING && psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* Harmonic noise shaping */ HarmShapeGain = HARMONIC_SHAPING; /* More harmonic noise shaping for high bitrates or noisy input */ HarmShapeGain += HIGH_RATE_OR_LOW_QUALITY_HARMONIC_SHAPING * ( 1.0f - ( 1.0f - psEncCtrl->coding_quality ) * psEncCtrl->input_quality ); /* Less harmonic noise shaping for less periodic signals */ HarmShapeGain *= ( silk_float )sqrt( psEnc->LTPCorr ); } else { HarmShapeGain = 0.0f; } /*************************/ /* Smooth over subframes */ /*************************/ for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { psShapeSt->HarmBoost_smth += SUBFR_SMTH_COEF * ( HarmBoost - psShapeSt->HarmBoost_smth ); psEncCtrl->HarmBoost[ k ] = psShapeSt->HarmBoost_smth; psShapeSt->HarmShapeGain_smth += SUBFR_SMTH_COEF * ( HarmShapeGain - psShapeSt->HarmShapeGain_smth ); psEncCtrl->HarmShapeGain[ k ] = psShapeSt->HarmShapeGain_smth; psShapeSt->Tilt_smth += SUBFR_SMTH_COEF * ( Tilt - psShapeSt->Tilt_smth ); psEncCtrl->Tilt[ k ] = psShapeSt->Tilt_smth; } }
/* Processing of gains */ void silk_process_gains_FLP( silk_encoder_state_FLP *psEnc, /* I/O Encoder state FLP */ silk_encoder_control_FLP *psEncCtrl, /* I/O Encoder control FLP */ opus_int condCoding /* I The type of conditional coding to use */ ) { silk_shape_state_FLP *psShapeSt = &psEnc->sShape; opus_int k; opus_int32 pGains_Q16[MAX_NB_SUBFR]; silk_float s, InvMaxSqrVal, gain, quant_offset; /* Gain reduction when LTP coding gain is high */ if (psEnc->sCmn.indices.signalType == TYPE_VOICED) { s = 1.0f - 0.5f * silk_sigmoid(0.25f * (psEncCtrl->LTPredCodGain - 12.0f)); for (k = 0; k < psEnc->sCmn.nb_subfr; k++) { psEncCtrl->Gains[k] *= s; } } /* Limit the quantized signal */ InvMaxSqrVal = (silk_float)(pow(2.0f, 0.33f * (21.0f - psEnc->sCmn.SNR_dB_Q7 * (1 / 128.0f))) / psEnc->sCmn.subfr_length); for (k = 0; k < psEnc->sCmn.nb_subfr; k++) { /* Soft limit on ratio residual energy and squared gains */ gain = psEncCtrl->Gains[k]; gain = (silk_float) sqrt(gain * gain + psEncCtrl->ResNrg[k] * InvMaxSqrVal); psEncCtrl->Gains[k] = silk_min_float(gain, 32767.0f); } /* Prepare gains for noise shaping quantization */ for (k = 0; k < psEnc->sCmn.nb_subfr; k++) { pGains_Q16[k] = (opus_int32)(psEncCtrl->Gains[k] * 65536.0f); } /* Save unquantized gains and gain Index */ silk_memcpy(psEncCtrl->GainsUnq_Q16, pGains_Q16, psEnc->sCmn.nb_subfr * sizeof(opus_int32)); psEncCtrl->lastGainIndexPrev = psShapeSt->LastGainIndex; /* Quantize gains */ silk_gains_quant(psEnc->sCmn.indices.GainsIndices, pGains_Q16, &psShapeSt->LastGainIndex, condCoding == CODE_CONDITIONALLY, psEnc->sCmn.nb_subfr); /* Overwrite unquantized gains with quantized gains and convert back to Q0 from Q16 */ for (k = 0; k < psEnc->sCmn.nb_subfr; k++) { psEncCtrl->Gains[k] = pGains_Q16[k] / 65536.0f; } /* Set quantizer offset for voiced signals. Larger offset when LTP coding gain is low or tilt is high (ie low-pass) */ if (psEnc->sCmn.indices.signalType == TYPE_VOICED) { if (psEncCtrl->LTPredCodGain + psEnc->sCmn.input_tilt_Q15 * (1.0f / 32768.0f) > 1.0f) { psEnc->sCmn.indices.quantOffsetType = 0; } else { psEnc->sCmn.indices.quantOffsetType = 1; } } /* Quantizer boundary adjustment */ quant_offset = silk_Quantization_Offsets_Q10[psEnc->sCmn.indices.signalType >> 1][psEnc->sCmn.indices.quantOffsetType] / 1024.0f; psEncCtrl->Lambda = LAMBDA_OFFSET + LAMBDA_DELAYED_DECISIONS * psEnc->sCmn.nStatesDelayedDecision + LAMBDA_SPEECH_ACT * psEnc->sCmn.speech_activity_Q8 * (1.0f / 256.0f) + LAMBDA_INPUT_QUALITY * psEncCtrl->input_quality + LAMBDA_CODING_QUALITY * psEncCtrl->coding_quality + LAMBDA_QUANT_OFFSET * quant_offset; silk_assert(psEncCtrl->Lambda > 0.0f); silk_assert(psEncCtrl->Lambda < 2.0f); }