void SKP_Silk_find_pred_coefs_FIX(SKP_Silk_encoder_state_FIX * psEnc, /* I/O encoder state */ SKP_Silk_encoder_control_FIX * psEncCtrl, /* I/O encoder control */ const int16_t res_pitch[] /* I Residual from pitch analysis */ ) { int i; int32_t WLTP[NB_SUBFR * LTP_ORDER * LTP_ORDER]; int32_t invGains_Q16[NB_SUBFR], local_gains_Qx[NB_SUBFR], Wght_Q15[NB_SUBFR]; int NLSF_Q15[MAX_LPC_ORDER]; const int16_t *x_ptr; int16_t *x_pre_ptr, LPC_in_pre[NB_SUBFR * MAX_LPC_ORDER + MAX_FRAME_LENGTH]; int32_t tmp, min_gain_Q16; #if !VARQ int LZ; #endif int LTP_corrs_rshift[NB_SUBFR]; /* weighting for weighted least squares */ min_gain_Q16 = int32_t_MAX >> 6; for (i = 0; i < NB_SUBFR; i++) { min_gain_Q16 = SKP_min(min_gain_Q16, psEncCtrl->Gains_Q16[i]); } #if !VARQ LZ = SKP_Silk_CLZ32(min_gain_Q16) - 1; LZ = SKP_LIMIT(LZ, 0, 16); min_gain_Q16 = SKP_RSHIFT(min_gain_Q16, 2); /* Ensure that maximum invGains_Q16 is within range of a 16 bit int */ #endif for (i = 0; i < NB_SUBFR; i++) { /* Divide to Q16 */ assert(psEncCtrl->Gains_Q16[i] > 0); #if VARQ /* Invert and normalize gains, and ensure that maximum invGains_Q16 is within range of a 16 bit int */ invGains_Q16[i] = SKP_DIV32_varQ(min_gain_Q16, psEncCtrl->Gains_Q16[i], 16 - 2); #else invGains_Q16[i] = SKP_DIV32(SKP_LSHIFT(min_gain_Q16, LZ), SKP_RSHIFT(psEncCtrl->Gains_Q16[i], 16 - LZ)); #endif /* Ensure Wght_Q15 a minimum value 1 */ invGains_Q16[i] = SKP_max(invGains_Q16[i], 363); /* Square the inverted gains */ assert(invGains_Q16[i] == SKP_SAT16(invGains_Q16[i])); tmp = SKP_SMULWB(invGains_Q16[i], invGains_Q16[i]); Wght_Q15[i] = SKP_RSHIFT(tmp, 1); /* Invert the inverted and normalized gains */ local_gains_Qx[i] = SKP_DIV32((1 << (16 + Qx)), invGains_Q16[i]); } if (psEncCtrl->sCmn.sigtype == SIG_TYPE_VOICED) { /**********/ /* VOICED */ /**********/ assert(psEnc->sCmn.frame_length - psEnc->sCmn.predictLPCOrder >= psEncCtrl->sCmn.pitchL[0] + LTP_ORDER / 2); /* LTP analysis */ SKP_Silk_find_LTP_FIX(psEncCtrl->LTPCoef_Q14, WLTP, &psEncCtrl->LTPredCodGain_Q7, res_pitch, res_pitch + SKP_RSHIFT(psEnc->sCmn.frame_length, 1), psEncCtrl->sCmn.pitchL, Wght_Q15, psEnc->sCmn.subfr_length, psEnc->sCmn.frame_length, LTP_corrs_rshift); /* Quantize LTP gain parameters */ SKP_Silk_quant_LTP_gains_FIX(psEncCtrl->LTPCoef_Q14, psEncCtrl->sCmn.LTPIndex, &psEncCtrl->sCmn.PERIndex, WLTP, psEnc->mu_LTP_Q8, psEnc->sCmn.LTPQuantLowComplexity); /* Control LTP scaling */ SKP_Silk_LTP_scale_ctrl_FIX(psEnc, psEncCtrl); /* Create LTP residual */ SKP_Silk_LTP_analysis_filter_FIX(LPC_in_pre, psEnc->x_buf + psEnc->sCmn.frame_length - psEnc->sCmn.predictLPCOrder, psEncCtrl->LTPCoef_Q14, psEncCtrl->sCmn.pitchL, invGains_Q16, 16, psEnc->sCmn.subfr_length, psEnc->sCmn.predictLPCOrder); } else { /************/ /* UNVOICED */ /************/ /* Create signal with prepended subframes, scaled by inverse gains */ x_ptr = psEnc->x_buf + psEnc->sCmn.frame_length - psEnc->sCmn.predictLPCOrder; x_pre_ptr = LPC_in_pre; for (i = 0; i < NB_SUBFR; i++) { SKP_Silk_scale_copy_vector16(x_pre_ptr, x_ptr, invGains_Q16[i], psEnc->sCmn.subfr_length + psEnc->sCmn. predictLPCOrder); x_pre_ptr += psEnc->sCmn.subfr_length + psEnc->sCmn.predictLPCOrder; x_ptr += psEnc->sCmn.subfr_length; } SKP_memset(psEncCtrl->LTPCoef_Q14, 0, NB_SUBFR * LTP_ORDER * sizeof(int16_t)); psEncCtrl->LTPredCodGain_Q7 = 0; } /* LPC_in_pre contains the LTP-filtered input for voiced, and the unfiltered input for unvoiced */ TIC(FIND_LPC) SKP_Silk_find_LPC_FIX(NLSF_Q15, &psEncCtrl->sCmn.NLSFInterpCoef_Q2, psEnc->sPred.prev_NLSFq_Q15, psEnc->sCmn.useInterpolatedNLSFs * (1 - psEnc-> sCmn. first_frame_after_reset), psEnc->sCmn.predictLPCOrder, LPC_in_pre, psEnc->sCmn.subfr_length + psEnc->sCmn.predictLPCOrder); TOC(FIND_LPC) /* Quantize LSFs */ TIC(PROCESS_LSFS) SKP_Silk_process_NLSFs_FIX(psEnc, psEncCtrl, NLSF_Q15); TOC(PROCESS_LSFS) /* Calculate residual energy using quantized LPC coefficients */ SKP_Silk_residual_energy_FIX(psEncCtrl->ResNrg, psEncCtrl->ResNrgQ, LPC_in_pre, (const int16_t(*)[])psEncCtrl->PredCoef_Q12, local_gains_Qx, Qx, psEnc->sCmn.subfr_length, psEnc->sCmn.predictLPCOrder); /* Copy to prediction struct for use in next frame for fluctuation reduction */ SKP_memcpy(psEnc->sPred.prev_NLSFq_Q15, NLSF_Q15, psEnc->sCmn.predictLPCOrder * sizeof(int)); }
/* notch filter just above Nyquist. */ void SKP_Silk_resampler_private_up2_HQ( SKP_int32 *S, /* I/O: Resampler state [ 6 ] */ SKP_int16 *out, /* O: Output signal [ 2 * len ] */ const SKP_int16 *in, /* I: Input signal [ len ] */ SKP_int32 len /* I: Number of INPUT samples */ ) { SKP_int32 k; SKP_int32 in32, out32_1, out32_2, Y, X; SKP_assert( SKP_Silk_resampler_up2_hq_0[ 0 ] > 0 ); SKP_assert( SKP_Silk_resampler_up2_hq_0[ 1 ] < 0 ); SKP_assert( SKP_Silk_resampler_up2_hq_1[ 0 ] > 0 ); SKP_assert( SKP_Silk_resampler_up2_hq_1[ 1 ] < 0 ); /* Internal variables and state are in Q10 format */ for( k = 0; k < len; k++ ) { /* Convert to Q10 */ in32 = SKP_LSHIFT( (SKP_int32)in[ k ], 10 ); /* First all-pass section for even output sample */ Y = SKP_SUB32( in32, S[ 0 ] ); X = SKP_SMULWB( Y, SKP_Silk_resampler_up2_hq_0[ 0 ] ); out32_1 = SKP_ADD32( S[ 0 ], X ); S[ 0 ] = SKP_ADD32( in32, X ); /* Second all-pass section for even output sample */ Y = SKP_SUB32( out32_1, S[ 1 ] ); X = SKP_SMLAWB( Y, Y, SKP_Silk_resampler_up2_hq_0[ 1 ] ); out32_2 = SKP_ADD32( S[ 1 ], X ); S[ 1 ] = SKP_ADD32( out32_1, X ); /* Biquad notch filter */ out32_2 = SKP_SMLAWB( out32_2, S[ 5 ], SKP_Silk_resampler_up2_hq_notch[ 2 ] ); out32_2 = SKP_SMLAWB( out32_2, S[ 4 ], SKP_Silk_resampler_up2_hq_notch[ 1 ] ); out32_1 = SKP_SMLAWB( out32_2, S[ 4 ], SKP_Silk_resampler_up2_hq_notch[ 0 ] ); S[ 5 ] = SKP_SUB32( out32_2, S[ 5 ] ); /* Apply gain in Q15, convert back to int16 and store to output */ out[ 2 * k ] = (SKP_int16)SKP_SAT16( SKP_RSHIFT32( SKP_SMLAWB( 256, out32_1, SKP_Silk_resampler_up2_hq_notch[ 3 ] ), 9 ) ); /* First all-pass section for odd output sample */ Y = SKP_SUB32( in32, S[ 2 ] ); X = SKP_SMULWB( Y, SKP_Silk_resampler_up2_hq_1[ 0 ] ); out32_1 = SKP_ADD32( S[ 2 ], X ); S[ 2 ] = SKP_ADD32( in32, X ); /* Second all-pass section for odd output sample */ Y = SKP_SUB32( out32_1, S[ 3 ] ); X = SKP_SMLAWB( Y, Y, SKP_Silk_resampler_up2_hq_1[ 1 ] ); out32_2 = SKP_ADD32( S[ 3 ], X ); S[ 3 ] = SKP_ADD32( out32_1, X ); /* Biquad notch filter */ out32_2 = SKP_SMLAWB( out32_2, S[ 4 ], SKP_Silk_resampler_up2_hq_notch[ 2 ] ); out32_2 = SKP_SMLAWB( out32_2, S[ 5 ], SKP_Silk_resampler_up2_hq_notch[ 1 ] ); out32_1 = SKP_SMLAWB( out32_2, S[ 5 ], SKP_Silk_resampler_up2_hq_notch[ 0 ] ); S[ 4 ] = SKP_SUB32( out32_2, S[ 4 ] ); /* Apply gain in Q15, convert back to int16 and store to output */ out[ 2 * k + 1 ] = (SKP_int16)SKP_SAT16( SKP_RSHIFT32( SKP_SMLAWB( 256, out32_1, SKP_Silk_resampler_up2_hq_notch[ 3 ] ), 9 ) ); } }
void SKP_Silk_NLSF_MSVQ_encode_FIX( SKP_int *NLSFIndices, /* O Codebook path vector [ CB_STAGES ] */ SKP_int *pNLSF_Q15, /* I/O Quantized NLSF vector [ LPC_ORDER ] */ const SKP_Silk_NLSF_CB_struct *psNLSF_CB, /* I Codebook object */ const SKP_int *pNLSF_q_Q15_prev, /* I Prev. quantized NLSF vector [LPC_ORDER] */ const SKP_int *pW_Q6, /* I NLSF weight vector [ LPC_ORDER ] */ const SKP_int NLSF_mu_Q15, /* I Rate weight for the RD optimization */ const SKP_int NLSF_mu_fluc_red_Q16, /* I Fluctuation reduction error weight */ const SKP_int NLSF_MSVQ_Survivors, /* I Max survivors from each stage */ const SKP_int LPC_order, /* I LPC order */ const SKP_int deactivate_fluc_red /* I Deactivate fluctuation reduction */ ) { SKP_int i, s, k, cur_survivors = 0, prev_survivors, min_survivors, input_index, cb_index, bestIndex; SKP_int32 rateDistThreshold_Q18; #if( NLSF_MSVQ_FLUCTUATION_REDUCTION == 1 ) SKP_int32 se_Q15, wsse_Q20, bestRateDist_Q20; #endif #if( LOW_COMPLEXITY_ONLY == 1 ) SKP_int32 pRateDist_Q18[ NLSF_MSVQ_TREE_SEARCH_MAX_VECTORS_EVALUATED_LC_MODE ]; SKP_int32 pRate_Q5[ MAX_NLSF_MSVQ_SURVIVORS_LC_MODE ]; SKP_int32 pRate_new_Q5[ MAX_NLSF_MSVQ_SURVIVORS_LC_MODE ]; SKP_int pTempIndices[ MAX_NLSF_MSVQ_SURVIVORS_LC_MODE ]; SKP_int pPath[ MAX_NLSF_MSVQ_SURVIVORS_LC_MODE * NLSF_MSVQ_MAX_CB_STAGES ]; SKP_int pPath_new[ MAX_NLSF_MSVQ_SURVIVORS_LC_MODE * NLSF_MSVQ_MAX_CB_STAGES ]; SKP_int pRes_Q15[ MAX_NLSF_MSVQ_SURVIVORS_LC_MODE * MAX_LPC_ORDER ]; SKP_int pRes_new_Q15[ MAX_NLSF_MSVQ_SURVIVORS_LC_MODE * MAX_LPC_ORDER ]; #else SKP_int32 pRateDist_Q18[ NLSF_MSVQ_TREE_SEARCH_MAX_VECTORS_EVALUATED ]; SKP_int32 pRate_Q5[ MAX_NLSF_MSVQ_SURVIVORS ]; SKP_int32 pRate_new_Q5[ MAX_NLSF_MSVQ_SURVIVORS ]; SKP_int pTempIndices[ MAX_NLSF_MSVQ_SURVIVORS ]; SKP_int pPath[ MAX_NLSF_MSVQ_SURVIVORS * NLSF_MSVQ_MAX_CB_STAGES ]; SKP_int pPath_new[ MAX_NLSF_MSVQ_SURVIVORS * NLSF_MSVQ_MAX_CB_STAGES ]; SKP_int pRes_Q15[ MAX_NLSF_MSVQ_SURVIVORS * MAX_LPC_ORDER ]; SKP_int pRes_new_Q15[ MAX_NLSF_MSVQ_SURVIVORS * MAX_LPC_ORDER ]; #endif const SKP_int *pConstInt; SKP_int *pInt; const SKP_int16 *pCB_element; const SKP_Silk_NLSF_CBS *pCurrentCBStage; #ifdef USE_UNQUANTIZED_LSFS SKP_int NLSF_orig[ MAX_LPC_ORDER ]; SKP_memcpy( NLSF_orig, pNLSF_Q15, LPC_order * sizeof( SKP_int ) ); #endif SKP_assert( NLSF_MSVQ_Survivors <= MAX_NLSF_MSVQ_SURVIVORS ); SKP_assert( ( LOW_COMPLEXITY_ONLY == 0 ) || ( NLSF_MSVQ_Survivors <= MAX_NLSF_MSVQ_SURVIVORS_LC_MODE ) ); /****************************************************/ /* Tree search for the multi-stage vector quantizer */ /****************************************************/ /* Clear accumulated rates */ SKP_memset( pRate_Q5, 0, NLSF_MSVQ_Survivors * sizeof( SKP_int32 ) ); /* Copy NLSFs into residual signal vector */ for( i = 0; i < LPC_order; i++ ) { pRes_Q15[ i ] = pNLSF_Q15[ i ]; } /* Set first stage values */ prev_survivors = 1; /* Minimum number of survivors */ min_survivors = NLSF_MSVQ_Survivors / 2; /* Loop over all stages */ for( s = 0; s < psNLSF_CB->nStages; s++ ) { /* Set a pointer to the current stage codebook */ pCurrentCBStage = &psNLSF_CB->CBStages[ s ]; /* Calculate the number of survivors in the current stage */ cur_survivors = SKP_min_32( NLSF_MSVQ_Survivors, SKP_SMULBB( prev_survivors, pCurrentCBStage->nVectors ) ); #if( NLSF_MSVQ_FLUCTUATION_REDUCTION == 0 ) /* Find a single best survivor in the last stage, if we */ /* do not need candidates for fluctuation reduction */ if( s == psNLSF_CB->nStages - 1 ) { cur_survivors = 1; } #endif /* Nearest neighbor clustering for multiple input data vectors */ SKP_Silk_NLSF_VQ_rate_distortion_FIX( pRateDist_Q18, pCurrentCBStage, pRes_Q15, pW_Q6, pRate_Q5, NLSF_mu_Q15, prev_survivors, LPC_order ); /* Sort the rate-distortion errors */ SKP_Silk_insertion_sort_increasing( pRateDist_Q18, pTempIndices, prev_survivors * pCurrentCBStage->nVectors, cur_survivors ); /* Discard survivors with rate-distortion values too far above the best one */ if( pRateDist_Q18[ 0 ] < SKP_int32_MAX / MAX_NLSF_MSVQ_SURVIVORS ) { rateDistThreshold_Q18 = SKP_SMLAWB( pRateDist_Q18[ 0 ], SKP_MUL( NLSF_MSVQ_Survivors, pRateDist_Q18[ 0 ] ), SKP_FIX_CONST( NLSF_MSVQ_SURV_MAX_REL_RD, 16 ) ); while( pRateDist_Q18[ cur_survivors - 1 ] > rateDistThreshold_Q18 && cur_survivors > min_survivors ) { cur_survivors--; } } /* Update accumulated codebook contributions for the 'cur_survivors' best codebook indices */ for( k = 0; k < cur_survivors; k++ ) { if( s > 0 ) { /* Find the indices of the input and the codebook vector */ if( pCurrentCBStage->nVectors == 8 ) { input_index = SKP_RSHIFT( pTempIndices[ k ], 3 ); cb_index = pTempIndices[ k ] & 7; } else { input_index = SKP_DIV32_16( pTempIndices[ k ], pCurrentCBStage->nVectors ); cb_index = pTempIndices[ k ] - SKP_SMULBB( input_index, pCurrentCBStage->nVectors ); } } else { /* Find the indices of the input and the codebook vector */ input_index = 0; cb_index = pTempIndices[ k ]; } /* Subtract new contribution from the previous residual vector for each of 'cur_survivors' */ pConstInt = &pRes_Q15[ SKP_SMULBB( input_index, LPC_order ) ]; pCB_element = &pCurrentCBStage->CB_NLSF_Q15[ SKP_SMULBB( cb_index, LPC_order ) ]; pInt = &pRes_new_Q15[ SKP_SMULBB( k, LPC_order ) ]; for( i = 0; i < LPC_order; i++ ) { pInt[ i ] = pConstInt[ i ] - ( SKP_int )pCB_element[ i ]; } /* Update accumulated rate for stage 1 to the current */ pRate_new_Q5[ k ] = pRate_Q5[ input_index ] + pCurrentCBStage->Rates_Q5[ cb_index ]; /* Copy paths from previous matrix, starting with the best path */ pConstInt = &pPath[ SKP_SMULBB( input_index, psNLSF_CB->nStages ) ]; pInt = &pPath_new[ SKP_SMULBB( k, psNLSF_CB->nStages ) ]; for( i = 0; i < s; i++ ) { pInt[ i ] = pConstInt[ i ]; } /* Write the current stage indices for the 'cur_survivors' to the best path matrix */ pInt[ s ] = cb_index; } if( s < psNLSF_CB->nStages - 1 ) { /* Copy NLSF residual matrix for next stage */ SKP_memcpy( pRes_Q15, pRes_new_Q15, SKP_SMULBB( cur_survivors, LPC_order ) * sizeof( SKP_int ) ); /* Copy rate vector for next stage */ SKP_memcpy( pRate_Q5, pRate_new_Q5, cur_survivors * sizeof( SKP_int32 ) ); /* Copy best path matrix for next stage */ SKP_memcpy( pPath, pPath_new, SKP_SMULBB( cur_survivors, psNLSF_CB->nStages ) * sizeof( SKP_int ) ); } prev_survivors = cur_survivors; } /* (Preliminary) index of the best survivor, later to be decoded */ bestIndex = 0; #if( NLSF_MSVQ_FLUCTUATION_REDUCTION == 1 ) /******************************/ /* NLSF fluctuation reduction */ /******************************/ if( deactivate_fluc_red != 1 ) { /* Search among all survivors, now taking also weighted fluctuation errors into account */ bestRateDist_Q20 = SKP_int32_MAX; for( s = 0; s < cur_survivors; s++ ) { /* Decode survivor to compare with previous quantized NLSF vector */ SKP_Silk_NLSF_MSVQ_decode( pNLSF_Q15, psNLSF_CB, &pPath_new[ SKP_SMULBB( s, psNLSF_CB->nStages ) ], LPC_order ); /* Compare decoded NLSF vector with the previously quantized vector */ wsse_Q20 = 0; for( i = 0; i < LPC_order; i += 2 ) { /* Compute weighted squared quantization error for index i */ se_Q15 = pNLSF_Q15[ i ] - pNLSF_q_Q15_prev[ i ]; // range: [ -32767 : 32767 ] wsse_Q20 = SKP_SMLAWB( wsse_Q20, SKP_SMULBB( se_Q15, se_Q15 ), pW_Q6[ i ] ); /* Compute weighted squared quantization error for index i + 1 */ se_Q15 = pNLSF_Q15[ i + 1 ] - pNLSF_q_Q15_prev[ i + 1 ]; // range: [ -32767 : 32767 ] wsse_Q20 = SKP_SMLAWB( wsse_Q20, SKP_SMULBB( se_Q15, se_Q15 ), pW_Q6[ i + 1 ] ); } SKP_assert( wsse_Q20 >= 0 ); /* Add the fluctuation reduction penalty to the rate distortion error */ wsse_Q20 = SKP_ADD_POS_SAT32( pRateDist_Q18[ s ], SKP_SMULWB( wsse_Q20, NLSF_mu_fluc_red_Q16 ) ); /* Keep index of best survivor */ if( wsse_Q20 < bestRateDist_Q20 ) { bestRateDist_Q20 = wsse_Q20; bestIndex = s; } } } #endif /* Copy best path to output argument */ SKP_memcpy( NLSFIndices, &pPath_new[ SKP_SMULBB( bestIndex, psNLSF_CB->nStages ) ], psNLSF_CB->nStages * sizeof( SKP_int ) ); /* Decode and stabilize the best survivor */ SKP_Silk_NLSF_MSVQ_decode( pNLSF_Q15, psNLSF_CB, NLSFIndices, LPC_order ); #ifdef USE_UNQUANTIZED_LSFS SKP_memcpy( pNLSF_Q15, NLSF_orig, LPC_order * sizeof( SKP_int ) ); #endif }
SKP_int SKP_Silk_VAD_GetSA_Q8( /* O Return value, 0 if success */ SKP_Silk_VAD_state *psSilk_VAD, /* I/O Silk VAD state */ SKP_int *pSA_Q8, /* O Speech activity level in Q8 */ SKP_int *pSNR_dB_Q7, /* O SNR for current frame in Q7 */ SKP_int pQuality_Q15[ VAD_N_BANDS ], /* O Smoothed SNR for each band */ SKP_int *pTilt_Q15, /* O current frame's frequency tilt */ const SKP_int16 pIn[], /* I PCM input [framelength] */ const SKP_int framelength /* I Input frame length */ ) { SKP_int SA_Q15, input_tilt; SKP_int32 scratch[ 3 * MAX_FRAME_LENGTH / 2 ]; SKP_int decimated_framelength, dec_subframe_length, dec_subframe_offset, SNR_Q7, i, b, s; SKP_int32 sumSquared, smooth_coef_Q16; SKP_int16 HPstateTmp; SKP_int16 X[ VAD_N_BANDS ][ MAX_FRAME_LENGTH / 2 ]; SKP_int32 Xnrg[ VAD_N_BANDS ]; SKP_int32 NrgToNoiseRatio_Q8[ VAD_N_BANDS ]; SKP_int32 speech_nrg, x_tmp; SKP_int ret = 0; /* Safety checks */ SKP_assert( VAD_N_BANDS == 4 ); SKP_assert( MAX_FRAME_LENGTH >= framelength ); SKP_assert( framelength <= 512 ); /***********************/ /* Filter and Decimate */ /***********************/ /* 0-8 kHz to 0-4 kHz and 4-8 kHz */ SKP_Silk_ana_filt_bank_1( pIn, &psSilk_VAD->AnaState[ 0 ], &X[ 0 ][ 0 ], &X[ 3 ][ 0 ], &scratch[ 0 ], framelength ); /* 0-4 kHz to 0-2 kHz and 2-4 kHz */ SKP_Silk_ana_filt_bank_1( &X[ 0 ][ 0 ], &psSilk_VAD->AnaState1[ 0 ], &X[ 0 ][ 0 ], &X[ 2 ][ 0 ], &scratch[ 0 ], SKP_RSHIFT( framelength, 1 ) ); /* 0-2 kHz to 0-1 kHz and 1-2 kHz */ SKP_Silk_ana_filt_bank_1( &X[ 0 ][ 0 ], &psSilk_VAD->AnaState2[ 0 ], &X[ 0 ][ 0 ], &X[ 1 ][ 0 ], &scratch[ 0 ], SKP_RSHIFT( framelength, 2 ) ); /*********************************************/ /* HP filter on lowest band (differentiator) */ /*********************************************/ decimated_framelength = SKP_RSHIFT( framelength, 3 ); X[ 0 ][ decimated_framelength - 1 ] = SKP_RSHIFT( X[ 0 ][ decimated_framelength - 1 ], 1 ); HPstateTmp = X[ 0 ][ decimated_framelength - 1 ]; for( i = decimated_framelength - 1; i > 0; i-- ) { X[ 0 ][ i - 1 ] = SKP_RSHIFT( X[ 0 ][ i - 1 ], 1 ); X[ 0 ][ i ] -= X[ 0 ][ i - 1 ]; } X[ 0 ][ 0 ] -= psSilk_VAD->HPstate; psSilk_VAD->HPstate = HPstateTmp; /*************************************/ /* Calculate the energy in each band */ /*************************************/ for( b = 0; b < VAD_N_BANDS; b++ ) { /* Find the decimated framelength in the non-uniformly divided bands */ decimated_framelength = SKP_RSHIFT( framelength, SKP_min_int( VAD_N_BANDS - b, VAD_N_BANDS - 1 ) ); /* Split length into subframe lengths */ dec_subframe_length = SKP_RSHIFT( decimated_framelength, VAD_INTERNAL_SUBFRAMES_LOG2 ); dec_subframe_offset = 0; /* Compute energy per sub-frame */ /* initialize with summed energy of last subframe */ Xnrg[ b ] = psSilk_VAD->XnrgSubfr[ b ]; for( s = 0; s < VAD_INTERNAL_SUBFRAMES; s++ ) { sumSquared = 0; for( i = 0; i < dec_subframe_length; i++ ) { /* The energy will be less than dec_subframe_length * ( SKP_int16_MIN / 8 )^2. */ /* Therefore we can accumulate with no risk of overflow (unless dec_subframe_length > 128) */ x_tmp = SKP_RSHIFT( X[ b ][ i + dec_subframe_offset ], 3 ); sumSquared = SKP_SMLABB( sumSquared, x_tmp, x_tmp ); /* Safety check */ SKP_assert( sumSquared >= 0 ); } /* add/saturate summed energy of current subframe */ if( s < VAD_INTERNAL_SUBFRAMES - 1 ) { Xnrg[ b ] = SKP_ADD_POS_SAT32( Xnrg[ b ], sumSquared ); } else { /* look-ahead subframe */ Xnrg[ b ] = SKP_ADD_POS_SAT32( Xnrg[ b ], SKP_RSHIFT( sumSquared, 1 ) ); } dec_subframe_offset += dec_subframe_length; } psSilk_VAD->XnrgSubfr[ b ] = sumSquared; } /********************/ /* Noise estimation */ /********************/ SKP_Silk_VAD_GetNoiseLevels( &Xnrg[ 0 ], psSilk_VAD ); /***********************************************/ /* Signal-plus-noise to noise ratio estimation */ /***********************************************/ sumSquared = 0; input_tilt = 0; for( b = 0; b < VAD_N_BANDS; b++ ) { speech_nrg = Xnrg[ b ] - psSilk_VAD->NL[ b ]; if( speech_nrg > 0 ) { /* Divide, with sufficient resolution */ if( ( Xnrg[ b ] & 0xFF800000 ) == 0 ) { NrgToNoiseRatio_Q8[ b ] = SKP_DIV32( SKP_LSHIFT( Xnrg[ b ], 8 ), psSilk_VAD->NL[ b ] + 1 ); } else { NrgToNoiseRatio_Q8[ b ] = SKP_DIV32( Xnrg[ b ], SKP_RSHIFT( psSilk_VAD->NL[ b ], 8 ) + 1 ); } /* Convert to log domain */ SNR_Q7 = SKP_Silk_lin2log( NrgToNoiseRatio_Q8[ b ] ) - 8 * 128; /* Sum-of-squares */ sumSquared = SKP_SMLABB( sumSquared, SNR_Q7, SNR_Q7 ); /* Q14 */ /* Tilt measure */ if( speech_nrg < ( 1 << 20 ) ) { /* Scale down SNR value for small subband speech energies */ SNR_Q7 = SKP_SMULWB( SKP_LSHIFT( SKP_Silk_SQRT_APPROX( speech_nrg ), 6 ), SNR_Q7 ); } input_tilt = SKP_SMLAWB( input_tilt, tiltWeights[ b ], SNR_Q7 ); } else { NrgToNoiseRatio_Q8[ b ] = 256; } } /* Mean-of-squares */ sumSquared = SKP_DIV32_16( sumSquared, VAD_N_BANDS ); /* Q14 */ /* Root-mean-square approximation, scale to dBs, and write to output pointer */ *pSNR_dB_Q7 = ( SKP_int16 )( 3 * SKP_Silk_SQRT_APPROX( sumSquared ) ); /* Q7 */ /*********************************/ /* Speech Probability Estimation */ /*********************************/ SA_Q15 = SKP_Silk_sigm_Q15( SKP_SMULWB( VAD_SNR_FACTOR_Q16, *pSNR_dB_Q7 ) - VAD_NEGATIVE_OFFSET_Q5 ); /**************************/ /* Frequency Tilt Measure */ /**************************/ *pTilt_Q15 = SKP_LSHIFT( SKP_Silk_sigm_Q15( input_tilt ) - 16384, 1 ); /**************************************************/ /* Scale the sigmoid output based on power levels */ /**************************************************/ speech_nrg = 0; for( b = 0; b < VAD_N_BANDS; b++ ) { /* Accumulate signal-without-noise energies, higher frequency bands have more weight */ speech_nrg += ( b + 1 ) * SKP_RSHIFT( Xnrg[ b ] - psSilk_VAD->NL[ b ], 4 ); } /* Power scaling */ if( speech_nrg <= 0 ) { SA_Q15 = SKP_RSHIFT( SA_Q15, 1 ); } else if( speech_nrg < 32768 ) { /* square-root */ speech_nrg = SKP_Silk_SQRT_APPROX( SKP_LSHIFT( speech_nrg, 15 ) ); SA_Q15 = SKP_SMULWB( 32768 + speech_nrg, SA_Q15 ); } /* Copy the resulting speech activity in Q8 to *pSA_Q8 */ *pSA_Q8 = SKP_min_int( SKP_RSHIFT( SA_Q15, 7 ), SKP_uint8_MAX ); /***********************************/ /* Energy Level and SNR estimation */ /***********************************/ /* smoothing coefficient */ smooth_coef_Q16 = SKP_SMULWB( VAD_SNR_SMOOTH_COEF_Q18, SKP_SMULWB( SA_Q15, SA_Q15 ) ); for( b = 0; b < VAD_N_BANDS; b++ ) { /* compute smoothed energy-to-noise ratio per band */ psSilk_VAD->NrgRatioSmth_Q8[ b ] = SKP_SMLAWB( psSilk_VAD->NrgRatioSmth_Q8[ b ], NrgToNoiseRatio_Q8[ b ] - psSilk_VAD->NrgRatioSmth_Q8[ b ], smooth_coef_Q16 ); /* signal to noise ratio in dB per band */ SNR_Q7 = 3 * ( SKP_Silk_lin2log( psSilk_VAD->NrgRatioSmth_Q8[b] ) - 8 * 128 ); /* quality = sigmoid( 0.25 * ( SNR_dB - 16 ) ); */ pQuality_Q15[ b ] = SKP_Silk_sigm_Q15( SKP_RSHIFT( SNR_Q7 - 16 * 128, 4 ) ); } return( ret ); }
void SKP_Silk_PLC_conceal( SKP_Silk_decoder_state *psDec, /* I/O Decoder state */ SKP_Silk_decoder_control *psDecCtrl, /* I/O Decoder control */ SKP_int16 signal[], /* O concealed signal */ SKP_int length /* I length of residual */ ) { SKP_int i, j, k; SKP_int16 *B_Q14, exc_buf[ MAX_FRAME_LENGTH ], *exc_buf_ptr; SKP_int16 rand_scale_Q14; union { SKP_int16 as_int16[ MAX_LPC_ORDER ]; SKP_int32 as_int32[ MAX_LPC_ORDER / 2 ]; } A_Q12_tmp; SKP_int32 rand_seed, harm_Gain_Q15, rand_Gain_Q15; SKP_int lag, idx, sLTP_buf_idx, shift1, shift2; SKP_int32 energy1, energy2, *rand_ptr, *pred_lag_ptr; SKP_int32 sig_Q10[ MAX_FRAME_LENGTH ], *sig_Q10_ptr, LPC_exc_Q10, LPC_pred_Q10, LTP_pred_Q14; SKP_Silk_PLC_struct *psPLC; psPLC = &psDec->sPLC; /* Update LTP buffer */ SKP_memcpy( psDec->sLTP_Q16, &psDec->sLTP_Q16[ psDec->frame_length ], psDec->frame_length * sizeof( SKP_int32 ) ); /* LPC concealment. Apply BWE to previous LPC */ SKP_Silk_bwexpander( psPLC->prevLPC_Q12, psDec->LPC_order, BWE_COEF_Q16 ); /* Find random noise component */ /* Scale previous excitation signal */ exc_buf_ptr = exc_buf; for( k = ( NB_SUBFR >> 1 ); k < NB_SUBFR; k++ ) { for( i = 0; i < psDec->subfr_length; i++ ) { exc_buf_ptr[ i ] = ( SKP_int16 )SKP_RSHIFT( SKP_SMULWW( psDec->exc_Q10[ i + k * psDec->subfr_length ], psPLC->prevGain_Q16[ k ] ), 10 ); } exc_buf_ptr += psDec->subfr_length; } /* Find the subframe with lowest energy of the last two and use that as random noise generator */ SKP_Silk_sum_sqr_shift( &energy1, &shift1, exc_buf, psDec->subfr_length ); SKP_Silk_sum_sqr_shift( &energy2, &shift2, &exc_buf[ psDec->subfr_length ], psDec->subfr_length ); if( SKP_RSHIFT( energy1, shift2 ) < SKP_RSHIFT( energy2, shift1 ) ) { /* First sub-frame has lowest energy */ rand_ptr = &psDec->exc_Q10[ SKP_max_int( 0, 3 * psDec->subfr_length - RAND_BUF_SIZE ) ]; } else { /* Second sub-frame has lowest energy */ rand_ptr = &psDec->exc_Q10[ SKP_max_int( 0, psDec->frame_length - RAND_BUF_SIZE ) ]; } /* Setup Gain to random noise component */ B_Q14 = psPLC->LTPCoef_Q14; rand_scale_Q14 = psPLC->randScale_Q14; /* Setup attenuation gains */ harm_Gain_Q15 = HARM_ATT_Q15[ SKP_min_int( NB_ATT - 1, psDec->lossCnt ) ]; if( psDec->prev_sigtype == SIG_TYPE_VOICED ) { rand_Gain_Q15 = PLC_RAND_ATTENUATE_V_Q15[ SKP_min_int( NB_ATT - 1, psDec->lossCnt ) ]; } else { rand_Gain_Q15 = PLC_RAND_ATTENUATE_UV_Q15[ SKP_min_int( NB_ATT - 1, psDec->lossCnt ) ]; } /* First Lost frame */ if( psDec->lossCnt == 0 ) { rand_scale_Q14 = (1 << 14 ); /* Reduce random noise Gain for voiced frames */ if( psDec->prev_sigtype == SIG_TYPE_VOICED ) { for( i = 0; i < LTP_ORDER; i++ ) { rand_scale_Q14 -= B_Q14[ i ]; } rand_scale_Q14 = SKP_max_16( 3277, rand_scale_Q14 ); /* 0.2 */ rand_scale_Q14 = ( SKP_int16 )SKP_RSHIFT( SKP_SMULBB( rand_scale_Q14, psPLC->prevLTP_scale_Q14 ), 14 ); } /* Reduce random noise for unvoiced frames with high LPC gain */ if( psDec->prev_sigtype == SIG_TYPE_UNVOICED ) { SKP_int32 invGain_Q30, down_scale_Q30; SKP_Silk_LPC_inverse_pred_gain( &invGain_Q30, psPLC->prevLPC_Q12, psDec->LPC_order ); down_scale_Q30 = SKP_min_32( SKP_RSHIFT( ( 1 << 30 ), LOG2_INV_LPC_GAIN_HIGH_THRES ), invGain_Q30 ); down_scale_Q30 = SKP_max_32( SKP_RSHIFT( ( 1 << 30 ), LOG2_INV_LPC_GAIN_LOW_THRES ), down_scale_Q30 ); down_scale_Q30 = SKP_LSHIFT( down_scale_Q30, LOG2_INV_LPC_GAIN_HIGH_THRES ); rand_Gain_Q15 = SKP_RSHIFT( SKP_SMULWB( down_scale_Q30, rand_Gain_Q15 ), 14 ); } } rand_seed = psPLC->rand_seed; lag = SKP_RSHIFT_ROUND( psPLC->pitchL_Q8, 8 ); sLTP_buf_idx = psDec->frame_length; /***************************/ /* LTP synthesis filtering */ /***************************/ sig_Q10_ptr = sig_Q10; for( k = 0; k < NB_SUBFR; k++ ) { /* Setup pointer */ pred_lag_ptr = &psDec->sLTP_Q16[ sLTP_buf_idx - lag + LTP_ORDER / 2 ]; for( i = 0; i < psDec->subfr_length; i++ ) { rand_seed = SKP_RAND( rand_seed ); idx = SKP_RSHIFT( rand_seed, 25 ) & RAND_BUF_MASK; /* Unrolled loop */ LTP_pred_Q14 = SKP_SMULWB( pred_lag_ptr[ 0 ], B_Q14[ 0 ] ); LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -1 ], B_Q14[ 1 ] ); LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -2 ], B_Q14[ 2 ] ); LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -3 ], B_Q14[ 3 ] ); LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], B_Q14[ 4 ] ); pred_lag_ptr++; /* Generate LPC residual */ LPC_exc_Q10 = SKP_LSHIFT( SKP_SMULWB( rand_ptr[ idx ], rand_scale_Q14 ), 2 ); /* Random noise part */ LPC_exc_Q10 = SKP_ADD32( LPC_exc_Q10, SKP_RSHIFT_ROUND( LTP_pred_Q14, 4 ) ); /* Harmonic part */ /* Update states */ psDec->sLTP_Q16[ sLTP_buf_idx ] = SKP_LSHIFT( LPC_exc_Q10, 6 ); sLTP_buf_idx++; /* Save LPC residual */ sig_Q10_ptr[ i ] = LPC_exc_Q10; } sig_Q10_ptr += psDec->subfr_length; /* Gradually reduce LTP gain */ for( j = 0; j < LTP_ORDER; j++ ) { B_Q14[ j ] = SKP_RSHIFT( SKP_SMULBB( harm_Gain_Q15, B_Q14[ j ] ), 15 ); } /* Gradually reduce excitation gain */ rand_scale_Q14 = SKP_RSHIFT( SKP_SMULBB( rand_scale_Q14, rand_Gain_Q15 ), 15 ); /* Slowly increase pitch lag */ psPLC->pitchL_Q8 += SKP_SMULWB( psPLC->pitchL_Q8, PITCH_DRIFT_FAC_Q16 ); psPLC->pitchL_Q8 = SKP_min_32( psPLC->pitchL_Q8, SKP_LSHIFT( SKP_SMULBB( MAX_PITCH_LAG_MS, psDec->fs_kHz ), 8 ) ); lag = SKP_RSHIFT_ROUND( psPLC->pitchL_Q8, 8 ); } /***************************/ /* LPC synthesis filtering */ /***************************/ sig_Q10_ptr = sig_Q10; /* Preload LPC coeficients to array on stack. Gives small performance gain */ SKP_memcpy( A_Q12_tmp.as_int16, psPLC->prevLPC_Q12, psDec->LPC_order * sizeof( SKP_int16 ) ); SKP_assert( psDec->LPC_order >= 10 ); /* check that unrolling works */ for( k = 0; k < NB_SUBFR; k++ ) { for( i = 0; i < psDec->subfr_length; i++ ){ /* partly unrolled */ LPC_pred_Q10 = SKP_SMULWB( psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 1 ], A_Q12_tmp.as_int16[ 0 ] ); LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 2 ], A_Q12_tmp.as_int16[ 1 ] ); LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 3 ], A_Q12_tmp.as_int16[ 2 ] ); LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 4 ], A_Q12_tmp.as_int16[ 3 ] ); LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 5 ], A_Q12_tmp.as_int16[ 4 ] ); LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 6 ], A_Q12_tmp.as_int16[ 5 ] ); LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 7 ], A_Q12_tmp.as_int16[ 6 ] ); LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 8 ], A_Q12_tmp.as_int16[ 7 ] ); LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 9 ], A_Q12_tmp.as_int16[ 8 ] ); LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 10 ], A_Q12_tmp.as_int16[ 9 ] ); for( j = 10; j < psDec->LPC_order; j++ ) { LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - j - 1 ], A_Q12_tmp.as_int16[ j ] ); } /* Add prediction to LPC residual */ sig_Q10_ptr[ i ] = SKP_ADD32( sig_Q10_ptr[ i ], LPC_pred_Q10 ); /* Update states */ psDec->sLPC_Q14[ MAX_LPC_ORDER + i ] = SKP_LSHIFT( sig_Q10_ptr[ i ], 4 ); } sig_Q10_ptr += psDec->subfr_length; /* Update LPC filter state */ SKP_memcpy( psDec->sLPC_Q14, &psDec->sLPC_Q14[ psDec->subfr_length ], MAX_LPC_ORDER * sizeof( SKP_int32 ) ); } /* Scale with Gain */ for( i = 0; i < psDec->frame_length; i++ ) { signal[ i ] = ( SKP_int16 )SKP_SAT16( SKP_RSHIFT_ROUND( SKP_SMULWW( sig_Q10[ i ], psPLC->prevGain_Q16[ NB_SUBFR - 1 ] ), 10 ) ); } /**************************************/ /* Update states */ /**************************************/ psPLC->rand_seed = rand_seed; psPLC->randScale_Q14 = rand_scale_Q14; for( i = 0; i < NB_SUBFR; i++ ) { psDecCtrl->pitchL[ i ] = lag; } }
/* Resample with a 2x downsampler (optional), a 2nd order AR filter followed by FIR interpolation */ void SKP_Silk_resampler_private_down_FIR( void *SS, /* I/O: Resampler state */ SKP_int16 out[], /* O: Output signal */ const SKP_int16 in[], /* I: Input signal */ SKP_int32 inLen /* I: Number of input samples */ ) { SKP_Silk_resampler_state_struct *S = (SKP_Silk_resampler_state_struct *)SS; SKP_int32 nSamplesIn, interpol_ind; SKP_int32 max_index_Q16, index_Q16, index_increment_Q16, res_Q6; SKP_int16 buf1[ RESAMPLER_MAX_BATCH_SIZE_IN / 2 ]; SKP_int32 buf2[ RESAMPLER_MAX_BATCH_SIZE_IN + RESAMPLER_DOWN_ORDER_FIR ]; SKP_int32 *buf_ptr; const SKP_int16 *interpol_ptr, *FIR_Coefs; /* Copy buffered samples to start of buffer */ SKP_memcpy( buf2, S->sFIR, RESAMPLER_DOWN_ORDER_FIR * sizeof( SKP_int32 ) ); FIR_Coefs = &S->Coefs[ 2 ]; /* Iterate over blocks of frameSizeIn input samples */ index_increment_Q16 = S->invRatio_Q16; while( 1 ) { nSamplesIn = SKP_min( inLen, S->batchSize ); if( S->input2x == 1 ) { /* Downsample 2x */ SKP_Silk_resampler_down2( S->sDown2, buf1, in, nSamplesIn ); nSamplesIn = SKP_RSHIFT32( nSamplesIn, 1 ); /* Second-order AR filter (output in Q8) */ SKP_Silk_resampler_private_AR2( S->sIIR, &buf2[ RESAMPLER_DOWN_ORDER_FIR ], buf1, S->Coefs, nSamplesIn ); } else { /* Second-order AR filter (output in Q8) */ SKP_Silk_resampler_private_AR2( S->sIIR, &buf2[ RESAMPLER_DOWN_ORDER_FIR ], in, S->Coefs, nSamplesIn ); } max_index_Q16 = SKP_LSHIFT32( nSamplesIn, 16 ); /* Interpolate filtered signal */ if( S->FIR_Fracs == 1 ) { for( index_Q16 = 0; index_Q16 < max_index_Q16; index_Q16 += index_increment_Q16 ) { /* Integer part gives pointer to buffered input */ buf_ptr = buf2 + SKP_RSHIFT( index_Q16, 16 ); /* Inner product */ res_Q6 = SKP_SMULWB( SKP_ADD32( buf_ptr[ 0 ], buf_ptr[ 11 ] ), FIR_Coefs[ 0 ] ); res_Q6 = SKP_SMLAWB( res_Q6, SKP_ADD32( buf_ptr[ 1 ], buf_ptr[ 10 ] ), FIR_Coefs[ 1 ] ); res_Q6 = SKP_SMLAWB( res_Q6, SKP_ADD32( buf_ptr[ 2 ], buf_ptr[ 9 ] ), FIR_Coefs[ 2 ] ); res_Q6 = SKP_SMLAWB( res_Q6, SKP_ADD32( buf_ptr[ 3 ], buf_ptr[ 8 ] ), FIR_Coefs[ 3 ] ); res_Q6 = SKP_SMLAWB( res_Q6, SKP_ADD32( buf_ptr[ 4 ], buf_ptr[ 7 ] ), FIR_Coefs[ 4 ] ); res_Q6 = SKP_SMLAWB( res_Q6, SKP_ADD32( buf_ptr[ 5 ], buf_ptr[ 6 ] ), FIR_Coefs[ 5 ] ); /* Scale down, saturate and store in output array */ *out++ = (SKP_int16)SKP_SAT16( SKP_RSHIFT_ROUND( res_Q6, 6 ) ); } } else { for( index_Q16 = 0; index_Q16 < max_index_Q16; index_Q16 += index_increment_Q16 ) { /* Integer part gives pointer to buffered input */ buf_ptr = buf2 + SKP_RSHIFT( index_Q16, 16 ); /* Fractional part gives interpolation coefficients */ interpol_ind = SKP_SMULWB( index_Q16 & 0xFFFF, S->FIR_Fracs ); /* Inner product */ interpol_ptr = &FIR_Coefs[ RESAMPLER_DOWN_ORDER_FIR / 2 * interpol_ind ]; res_Q6 = SKP_SMULWB( buf_ptr[ 0 ], interpol_ptr[ 0 ] ); res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 1 ], interpol_ptr[ 1 ] ); res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 2 ], interpol_ptr[ 2 ] ); res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 3 ], interpol_ptr[ 3 ] ); res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 4 ], interpol_ptr[ 4 ] ); res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 5 ], interpol_ptr[ 5 ] ); interpol_ptr = &FIR_Coefs[ RESAMPLER_DOWN_ORDER_FIR / 2 * ( S->FIR_Fracs - 1 - interpol_ind ) ]; res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 11 ], interpol_ptr[ 0 ] ); res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 10 ], interpol_ptr[ 1 ] ); res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 9 ], interpol_ptr[ 2 ] ); res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 8 ], interpol_ptr[ 3 ] ); res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 7 ], interpol_ptr[ 4 ] ); res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 6 ], interpol_ptr[ 5 ] ); /* Scale down, saturate and store in output array */ *out++ = (SKP_int16)SKP_SAT16( SKP_RSHIFT_ROUND( res_Q6, 6 ) ); } } in += nSamplesIn << S->input2x; inLen -= nSamplesIn << S->input2x; if( inLen > S->input2x ) { /* More iterations to do; copy last part of filtered signal to beginning of buffer */ SKP_memcpy( buf2, &buf2[ nSamplesIn ], RESAMPLER_DOWN_ORDER_FIR * sizeof( SKP_int32 ) ); } else { break; } } /* Copy last part of filtered signal to the state for the next call */ SKP_memcpy( S->sFIR, &buf2[ nSamplesIn ], RESAMPLER_DOWN_ORDER_FIR * sizeof( SKP_int32 ) ); }