예제 #1
0
/* Glues concealed frames with new good recieved frames             */
void SKP_Silk_PLC_glue_frames(
    SKP_Silk_decoder_state      *psDec,             /* I/O decoder state    */
    SKP_Silk_decoder_control    *psDecCtrl,         /* I/O Decoder control  */
    SKP_int16                   signal[],           /* I/O signal           */
    SKP_int                     length              /* I length of residual */
)
{
    SKP_int   i, energy_shift;
    SKP_int32 energy;
    SKP_Silk_PLC_struct *psPLC;
    psPLC = &psDec->sPLC;

    if( psDec->lossCnt ) {
        /* Calculate energy in concealed residual */
        SKP_Silk_sum_sqr_shift( &psPLC->conc_energy, &psPLC->conc_energy_shift, signal, length );
        
        psPLC->last_frame_lost = 1;
    } else {
        if( psDec->sPLC.last_frame_lost ) {
            /* Calculate residual in decoded signal if last frame was lost */
            SKP_Silk_sum_sqr_shift( &energy, &energy_shift, signal, length );

            /* Normalize energies */
            if( energy_shift > psPLC->conc_energy_shift ) {
                psPLC->conc_energy = SKP_RSHIFT( psPLC->conc_energy, energy_shift - psPLC->conc_energy_shift );
            } else if( energy_shift < psPLC->conc_energy_shift ) {
                energy = SKP_RSHIFT( energy, psPLC->conc_energy_shift - energy_shift );
            }

            /* Fade in the energy difference */
            if( energy > psPLC->conc_energy ) {
                SKP_int32 frac_Q24, LZ;
                SKP_int32 gain_Q12, slope_Q12;

                LZ = SKP_Silk_CLZ32( psPLC->conc_energy );
                LZ = LZ - 1;
                psPLC->conc_energy = SKP_LSHIFT( psPLC->conc_energy, LZ );
                energy = SKP_RSHIFT( energy, SKP_max_32( 24 - LZ, 0 ) );
                
                frac_Q24 = SKP_DIV32( psPLC->conc_energy, SKP_max( energy, 1 ) );
                
                gain_Q12 = SKP_Silk_SQRT_APPROX( frac_Q24 );
                slope_Q12 = SKP_DIV32_16( ( 1 << 12 ) - gain_Q12, length );

                for( i = 0; i < length; i++ ) {
                    signal[ i ] = SKP_RSHIFT( SKP_MUL( gain_Q12, signal[ i ] ), 12 );
                    gain_Q12 += slope_Q12;
                    gain_Q12 = SKP_min( gain_Q12, ( 1 << 12 ) );
                }
            }
        }
        psPLC->last_frame_lost = 0;

    }
}
예제 #2
0
/* uses SMLAWB(), requiring armv5E and higher.                          */ 
SKP_int32 SKP_Silk_schur(                     /* O:    Returns residual energy                     */
    SKP_int16            *rc_Q15,               /* O:    reflection coefficients [order] Q15         */
    const SKP_int32      *c,                    /* I:    correlations [order+1]                      */
    const SKP_int32      order                  /* I:    prediction order                            */
)
{
    SKP_int        k, n, lz;
    SKP_int32    C[ SKP_Silk_MAX_ORDER_LPC + 1 ][ 2 ];
    SKP_int32    Ctmp1, Ctmp2, rc_tmp_Q15;

    /* Get number of leading zeros */
    lz = SKP_Silk_CLZ32( c[ 0 ] );

    /* Copy correlations and adjust level to Q30 */
    if( lz < 2 ) {
        /* lz must be 1, so shift one to the right */
        for( k = 0; k < order + 1; k++ ) {
            C[ k ][ 0 ] = C[ k ][ 1 ] = SKP_RSHIFT( c[ k ], 1 );
        }
    } else if( lz > 2 ) {
        /* Shift to the left */
        lz -= 2; 
        for( k = 0; k < order + 1; k++ ) {
            C[ k ][ 0 ] = C[ k ][ 1 ] = SKP_LSHIFT( c[k], lz );
        }
    } else {
        /* No need to shift */
        for( k = 0; k < order + 1; k++ ) {
            C[ k ][ 0 ] = C[ k ][ 1 ] = c[ k ];
        }
    }

    for( k = 0; k < order; k++ ) {
        
        /* Get reflection coefficient */
        rc_tmp_Q15 = -SKP_DIV32_16( C[ k + 1 ][ 0 ], SKP_max_32( SKP_RSHIFT( C[ 0 ][ 1 ], 15 ), 1 ) );

        /* Clip (shouldn't happen for properly conditioned inputs) */
        rc_tmp_Q15 = SKP_SAT16( rc_tmp_Q15 );

        /* Store */
        rc_Q15[ k ] = (SKP_int16)rc_tmp_Q15;

        /* Update correlations */
        for( n = 0; n < order - k; n++ ) {
            Ctmp1 = C[ n + k + 1 ][ 0 ];
            Ctmp2 = C[ n ][ 1 ];
            C[ n + k + 1 ][ 0 ] = SKP_SMLAWB( Ctmp1, SKP_LSHIFT( Ctmp2, 1 ), rc_tmp_Q15 );
            C[ n ][ 1 ]         = SKP_SMLAWB( Ctmp2, SKP_LSHIFT( Ctmp1, 1 ), rc_tmp_Q15 );
        }
    }

    /* return residual energy */
    return C[0][1];
}
/* Determine length of bitstream */
SKP_int SKP_Silk_range_coder_get_length(                /* O    returns number of BITS in stream            */
    const SKP_Silk_range_coder_state    *psRC,          /* I    compressed data structure                   */
    SKP_int                             *nBytes         /* O    number of BYTES in stream                   */
)
{
    SKP_int nBits;

    /* Number of bits in stream */
    nBits = SKP_LSHIFT( psRC->bufferIx, 3 ) + SKP_Silk_CLZ32( psRC->range_Q16 - 1 ) - 14;

    *nBytes = SKP_RSHIFT( nBits + 7, 3 );

    /* Return number of bits in bitstream */
    return nBits;
}
예제 #4
0
/* Calculates correlation matrix X'*X */
void SKP_Silk_corrMatrix_FIX(
    const SKP_int16                 *x,         /* I    x vector [L + order - 1] used to form data matrix X */
    const SKP_int                   L,          /* I    Length of vectors                                   */
    const SKP_int                   order,      /* I    Max lag for correlation                             */
    const SKP_int                   head_room,  /* I    Desired headroom                                    */
    SKP_int32                       *XX,        /* O    Pointer to X'*X correlation matrix [ order x order ]*/
    SKP_int                         *rshifts    /* I/O  Right shifts of correlations                        */
)
{
    SKP_int         i, j, lag, rshifts_local, head_room_rshifts;
    SKP_int32       energy;
    const SKP_int16 *ptr1, *ptr2;

    /* Calculate energy to find shift used to fit in 32 bits */
    SKP_Silk_sum_sqr_shift( &energy, &rshifts_local, x, L + order - 1 );

    /* Add shifts to get the desired head room */
    head_room_rshifts = SKP_max( head_room - SKP_Silk_CLZ32( energy ), 0 );
    
    energy = SKP_RSHIFT32( energy, head_room_rshifts );
    rshifts_local += head_room_rshifts;

    /* Calculate energy of first column (0) of X: X[:,0]'*X[:,0] */
    /* Remove contribution of first order - 1 samples */
    for( i = 0; i < order - 1; i++ ) {
        energy -= SKP_RSHIFT32( SKP_SMULBB( x[ i ], x[ i ] ), rshifts_local );
    }
    if( rshifts_local < *rshifts ) {
        /* Adjust energy */
        energy = SKP_RSHIFT32( energy, *rshifts - rshifts_local );
        rshifts_local = *rshifts;
    }

    /* Calculate energy of remaining columns of X: X[:,j]'*X[:,j] */
    /* Fill out the diagonal of the correlation matrix */
    matrix_ptr( XX, 0, 0, order ) = energy;
    ptr1 = &x[ order - 1 ]; /* First sample of column 0 of X */
    for( j = 1; j < order; j++ ) {
        energy = SKP_SUB32( energy, SKP_RSHIFT32( SKP_SMULBB( ptr1[ L - j ], ptr1[ L - j ] ), rshifts_local ) );
        energy = SKP_ADD32( energy, SKP_RSHIFT32( SKP_SMULBB( ptr1[ -j ], ptr1[ -j ] ), rshifts_local ) );
        matrix_ptr( XX, j, j, order ) = energy;
    }

    ptr2 = &x[ order - 2 ]; /* First sample of column 1 of X */
    /* Calculate the remaining elements of the correlation matrix */
    if( rshifts_local > 0 ) {
        /* Right shifting used */
        for( lag = 1; lag < order; lag++ ) {
            /* Inner product of column 0 and column lag: X[:,0]'*X[:,lag] */
            energy = 0;
            for( i = 0; i < L; i++ ) {
                energy += SKP_RSHIFT32( SKP_SMULBB( ptr1[ i ], ptr2[i] ), rshifts_local );
            }
            /* Calculate remaining off diagonal: X[:,j]'*X[:,j + lag] */
            matrix_ptr( XX, lag, 0, order ) = energy;
            matrix_ptr( XX, 0, lag, order ) = energy;
            for( j = 1; j < ( order - lag ); j++ ) {
                energy = SKP_SUB32( energy, SKP_RSHIFT32( SKP_SMULBB( ptr1[ L - j ], ptr2[ L - j ] ), rshifts_local ) );
                energy = SKP_ADD32( energy, SKP_RSHIFT32( SKP_SMULBB( ptr1[ -j ], ptr2[ -j ] ), rshifts_local ) );
                matrix_ptr( XX, lag + j, j, order ) = energy;
                matrix_ptr( XX, j, lag + j, order ) = energy;
            }
            ptr2--; /* Update pointer to first sample of next column (lag) in X */
        }
    } else {
        for( lag = 1; lag < order; lag++ ) {
            /* Inner product of column 0 and column lag: X[:,0]'*X[:,lag] */
            energy = SKP_Silk_inner_prod_aligned( ptr1, ptr2, L );
            matrix_ptr( XX, lag, 0, order ) = energy;
            matrix_ptr( XX, 0, lag, order ) = energy;
            /* Calculate remaining off diagonal: X[:,j]'*X[:,j + lag] */
            for( j = 1; j < ( order - lag ); j++ ) {
                energy = SKP_SUB32( energy, SKP_SMULBB( ptr1[ L - j ], ptr2[ L - j ] ) );
                energy = SKP_SMLABB( energy, ptr1[ -j ], ptr2[ -j ] );
                matrix_ptr( XX, lag + j, j, order ) = energy;
                matrix_ptr( XX, j, lag + j, order ) = energy;
            }
            ptr2--;/* Update pointer to first sample of next column (lag) in X */
        }
    }
    *rshifts = rshifts_local;
}
예제 #5
0
/* Compute reflection coefficients from input signal */
void SKP_Silk_burg_modified(
    SKP_int32       *res_nrg,           /* O    residual energy                                                 */
    SKP_int         *res_nrg_Q,         /* O    residual energy Q value                                         */
    SKP_int32       A_Q16[],            /* O    prediction coefficients (length order)                          */
    const SKP_int16 x[],                /* I    input signal, length: nb_subfr * ( D + subfr_length )           */
    const SKP_int   subfr_length,       /* I    input signal subframe length (including D preceeding samples)   */
    const SKP_int   nb_subfr,           /* I    number of subframes stacked in x                                */
    const SKP_int32 WhiteNoiseFrac_Q32, /* I    fraction added to zero-lag autocorrelation                      */
    const SKP_int   D                   /* I    order                                                           */
)
{
    SKP_int         k, n, s, lz, rshifts, rshifts_extra;
    SKP_int32       C0, num, nrg, rc_Q31, Atmp_QA, Atmp1, tmp1, tmp2, x1, x2;
    const SKP_int16 *x_ptr;

    SKP_int32       C_first_row[ SKP_Silk_MAX_ORDER_LPC ];
    SKP_int32       C_last_row[  SKP_Silk_MAX_ORDER_LPC ];
    SKP_int32       Af_QA[       SKP_Silk_MAX_ORDER_LPC ];

    SKP_int32       CAf[ SKP_Silk_MAX_ORDER_LPC + 1 ];
    SKP_int32       CAb[ SKP_Silk_MAX_ORDER_LPC + 1 ];

    SKP_assert( subfr_length * nb_subfr <= MAX_FRAME_SIZE );
    SKP_assert( nb_subfr <= MAX_NB_SUBFR );


    /* Compute autocorrelations, added over subframes */
    SKP_Silk_sum_sqr_shift( &C0, &rshifts, x, nb_subfr * subfr_length );
    if( rshifts > MAX_RSHIFTS ) {
        C0 = SKP_LSHIFT32( C0, rshifts - MAX_RSHIFTS );
        SKP_assert( C0 > 0 );
        rshifts = MAX_RSHIFTS;
    } else {
        lz = SKP_Silk_CLZ32( C0 ) - 1;
        rshifts_extra = N_BITS_HEAD_ROOM - lz;
        if( rshifts_extra > 0 ) {
            rshifts_extra = SKP_min( rshifts_extra, MAX_RSHIFTS - rshifts );
            C0 = SKP_RSHIFT32( C0, rshifts_extra );
        } else {
            rshifts_extra = SKP_max( rshifts_extra, MIN_RSHIFTS - rshifts );
            C0 = SKP_LSHIFT32( C0, -rshifts_extra );
        }
        rshifts += rshifts_extra;
    }
    SKP_memset( C_first_row, 0, SKP_Silk_MAX_ORDER_LPC * sizeof( SKP_int32 ) );
    if( rshifts > 0 ) {
        for( s = 0; s < nb_subfr; s++ ) {
            x_ptr = x + s * subfr_length;
            for( n = 1; n < D + 1; n++ ) {
                C_first_row[ n - 1 ] += (SKP_int32)SKP_RSHIFT64( 
                    SKP_Silk_inner_prod16_aligned_64( x_ptr, x_ptr + n, subfr_length - n ), rshifts );
            }
        }
    } else {
        for( s = 0; s < nb_subfr; s++ ) {
            x_ptr = x + s * subfr_length;
            for( n = 1; n < D + 1; n++ ) {
                C_first_row[ n - 1 ] += SKP_LSHIFT32( 
                    SKP_Silk_inner_prod_aligned( x_ptr, x_ptr + n, subfr_length - n ), -rshifts );
            }
        }
    }
    SKP_memcpy( C_last_row, C_first_row, SKP_Silk_MAX_ORDER_LPC * sizeof( SKP_int32 ) );
    
    /* Initialize */
    CAb[ 0 ] = CAf[ 0 ] = C0 + SKP_SMMUL( WhiteNoiseFrac_Q32, C0 ) + 1;         // Q(-rshifts)

    for( n = 0; n < D; n++ ) {
        /* Update first row of correlation matrix (without first element) */
        /* Update last row of correlation matrix (without last element, stored in reversed order) */
        /* Update C * Af */
        /* Update C * flipud(Af) (stored in reversed order) */
        if( rshifts > -2 ) {
            for( s = 0; s < nb_subfr; s++ ) {
                x_ptr = x + s * subfr_length;
                x1  = -SKP_LSHIFT32( (SKP_int32)x_ptr[ n ],                    16 - rshifts );      // Q(16-rshifts)
                x2  = -SKP_LSHIFT32( (SKP_int32)x_ptr[ subfr_length - n - 1 ], 16 - rshifts );      // Q(16-rshifts)
                tmp1 = SKP_LSHIFT32( (SKP_int32)x_ptr[ n ],                    QA - 16 );           // Q(QA-16)
                tmp2 = SKP_LSHIFT32( (SKP_int32)x_ptr[ subfr_length - n - 1 ], QA - 16 );           // Q(QA-16)
                for( k = 0; k < n; k++ ) {
                    C_first_row[ k ] = SKP_SMLAWB( C_first_row[ k ], x1, x_ptr[ n - k - 1 ]            ); // Q( -rshifts )
                    C_last_row[ k ]  = SKP_SMLAWB( C_last_row[ k ],  x2, x_ptr[ subfr_length - n + k ] ); // Q( -rshifts )
                    Atmp_QA = Af_QA[ k ];
                    tmp1 = SKP_SMLAWB( tmp1, Atmp_QA, x_ptr[ n - k - 1 ]            );              // Q(QA-16)
                    tmp2 = SKP_SMLAWB( tmp2, Atmp_QA, x_ptr[ subfr_length - n + k ] );              // Q(QA-16)
                }
                tmp1 = SKP_LSHIFT32( -tmp1, 32 - QA - rshifts );                                    // Q(16-rshifts)
                tmp2 = SKP_LSHIFT32( -tmp2, 32 - QA - rshifts );                                    // Q(16-rshifts)
                for( k = 0; k <= n; k++ ) {
                    CAf[ k ] = SKP_SMLAWB( CAf[ k ], tmp1, x_ptr[ n - k ]                    );     // Q( -rshift )
                    CAb[ k ] = SKP_SMLAWB( CAb[ k ], tmp2, x_ptr[ subfr_length - n + k - 1 ] );     // Q( -rshift )
                }
            }
        } else {
            for( s = 0; s < nb_subfr; s++ ) {
                x_ptr = x + s * subfr_length;
                x1  = -SKP_LSHIFT32( (SKP_int32)x_ptr[ n ],                    -rshifts );          // Q( -rshifts )
                x2  = -SKP_LSHIFT32( (SKP_int32)x_ptr[ subfr_length - n - 1 ], -rshifts );          // Q( -rshifts )
                tmp1 = SKP_LSHIFT32( (SKP_int32)x_ptr[ n ],                    17 );                // Q17
                tmp2 = SKP_LSHIFT32( (SKP_int32)x_ptr[ subfr_length - n - 1 ], 17 );                // Q17
                for( k = 0; k < n; k++ ) {
                    C_first_row[ k ] = SKP_MLA( C_first_row[ k ], x1, x_ptr[ n - k - 1 ]            ); // Q( -rshifts )
                    C_last_row[ k ]  = SKP_MLA( C_last_row[ k ],  x2, x_ptr[ subfr_length - n + k ] ); // Q( -rshifts )
                    Atmp1 = SKP_RSHIFT_ROUND( Af_QA[ k ], QA - 17 );                                // Q17
                    tmp1 = SKP_MLA( tmp1, x_ptr[ n - k - 1 ],            Atmp1 );                   // Q17
                    tmp2 = SKP_MLA( tmp2, x_ptr[ subfr_length - n + k ], Atmp1 );                   // Q17
                }
                tmp1 = -tmp1;                                                                       // Q17
                tmp2 = -tmp2;                                                                       // Q17
                for( k = 0; k <= n; k++ ) {
                    CAf[ k ] = SKP_SMLAWW( CAf[ k ], tmp1, 
                        SKP_LSHIFT32( (SKP_int32)x_ptr[ n - k ], -rshifts - 1 ) );                  // Q( -rshift )
                    CAb[ k ] = SKP_SMLAWW( CAb[ k ], tmp2, 
                        SKP_LSHIFT32( (SKP_int32)x_ptr[ subfr_length - n + k - 1 ], -rshifts - 1 ) );// Q( -rshift )
                }
            }
        }

        /* Calculate nominator and denominator for the next order reflection (parcor) coefficient */
        tmp1 = C_first_row[ n ];                                                            // Q( -rshifts )
        tmp2 = C_last_row[ n ];                                                             // Q( -rshifts )
        num  = 0;                                                                           // Q( -rshifts )
        nrg  = SKP_ADD32( CAb[ 0 ], CAf[ 0 ] );                                             // Q( 1-rshifts )
        for( k = 0; k < n; k++ ) {
            Atmp_QA = Af_QA[ k ];
            lz = SKP_Silk_CLZ32( SKP_abs( Atmp_QA ) ) - 1;
            lz = SKP_min( 32 - QA, lz );
            Atmp1 = SKP_LSHIFT32( Atmp_QA, lz );                                            // Q( QA + lz )

            tmp1 = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( C_last_row[  n - k - 1 ], Atmp1 ), 32 - QA - lz );    // Q( -rshifts )
            tmp2 = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( C_first_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz );    // Q( -rshifts )
            num  = SKP_ADD_LSHIFT32( num,  SKP_SMMUL( CAb[ n - k ],             Atmp1 ), 32 - QA - lz );    // Q( -rshifts )
            nrg  = SKP_ADD_LSHIFT32( nrg,  SKP_SMMUL( SKP_ADD32( CAb[ k + 1 ], CAf[ k + 1 ] ), 
                                                                                Atmp1 ), 32 - QA - lz );    // Q( 1-rshifts )
        }
        CAf[ n + 1 ] = tmp1;                                                                // Q( -rshifts )
        CAb[ n + 1 ] = tmp2;                                                                // Q( -rshifts )
        num = SKP_ADD32( num, tmp2 );                                                       // Q( -rshifts )
        num = SKP_LSHIFT32( -num, 1 );                                                      // Q( 1-rshifts )

        /* Calculate the next order reflection (parcor) coefficient */
        if( SKP_abs( num ) < nrg ) {
            rc_Q31 = SKP_DIV32_varQ( num, nrg, 31 );
        } else {
            /* Negative energy or ratio too high; set remaining coefficients to zero and exit loop */
            SKP_memset( &Af_QA[ n ], 0, ( D - n ) * sizeof( SKP_int32 ) );
            SKP_assert( 0 );
            break;
        }

        /* Update the AR coefficients */
        for( k = 0; k < (n + 1) >> 1; k++ ) {
            tmp1 = Af_QA[ k ];                                                              // QA
            tmp2 = Af_QA[ n - k - 1 ];                                                      // QA
            Af_QA[ k ]         = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( tmp2, rc_Q31 ), 1 );    // QA
            Af_QA[ n - k - 1 ] = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( tmp1, rc_Q31 ), 1 );    // QA
        }
        Af_QA[ n ] = SKP_RSHIFT32( rc_Q31, 31 - QA );                                       // QA

        /* Update C * Af and C * Ab */
        for( k = 0; k <= n + 1; k++ ) {
            tmp1 = CAf[ k ];                                                                // Q( -rshifts )
            tmp2 = CAb[ n - k + 1 ];                                                        // Q( -rshifts )
            CAf[ k ]         = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( tmp2, rc_Q31 ), 1 );      // Q( -rshifts )
            CAb[ n - k + 1 ] = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( tmp1, rc_Q31 ), 1 );      // Q( -rshifts )
        }
    }

    /* Return residual energy */
    nrg  = CAf[ 0 ];                                                                        // Q( -rshifts )
    tmp1 = 1 << 16;                                                                         // Q16
    for( k = 0; k < D; k++ ) {
        Atmp1 = SKP_RSHIFT_ROUND( Af_QA[ k ], QA - 16 );                                    // Q16
        nrg  = SKP_SMLAWW( nrg, CAf[ k + 1 ], Atmp1 );                                      // Q( -rshifts )
        tmp1 = SKP_SMLAWW( tmp1, Atmp1, Atmp1 );                                            // Q16
        A_Q16[ k ] = -Atmp1;
    }
    *res_nrg = SKP_SMLAWW( nrg, SKP_SMMUL( WhiteNoiseFrac_Q32, C0 ), -tmp1 );               // Q( -rshifts )
    *res_nrg_Q = -rshifts;
}
void SKP_Silk_noise_shape_analysis_FIX(
    SKP_Silk_encoder_state_FIX      *psEnc,         /* I/O  Encoder state FIX                           */
    SKP_Silk_encoder_control_FIX    *psEncCtrl,     /* I/O  Encoder control FIX                         */
    const SKP_int16                 *pitch_res,     /* I    LPC residual from pitch analysis            */
    const SKP_int16                 *x              /* I    Input signal [ 2 * frame_length + la_shape ]*/
)
{
    SKP_Silk_shape_state_FIX *psShapeSt = &psEnc->sShape;
    SKP_int     k, nSamples, lz, Qnrg, b_Q14, scale = 0, sz;
    SKP_int32   SNR_adj_dB_Q7, HarmBoost_Q16, HarmShapeGain_Q16, Tilt_Q16, tmp32;
    SKP_int32   nrg, pre_nrg_Q30, log_energy_Q7, log_energy_prev_Q7, energy_variation_Q7;
    SKP_int32   delta_Q16, BWExp1_Q16, BWExp2_Q16, gain_mult_Q16, gain_add_Q16, strength_Q16, b_Q8;
    SKP_int32   auto_corr[     SHAPE_LPC_ORDER_MAX + 1 ];
    SKP_int32   refl_coef_Q16[ SHAPE_LPC_ORDER_MAX ];
    SKP_int32   AR_Q24[        SHAPE_LPC_ORDER_MAX ];
    SKP_int16   x_windowed[    SHAPE_LPC_WIN_MAX ];
    const SKP_int16 *x_ptr, *pitch_res_ptr;

    SKP_int32   sqrt_nrg[ NB_SUBFR ], Qnrg_vec[ NB_SUBFR ];

    /* Point to start of first LPC analysis block */
    x_ptr = x + psEnc->sCmn.la_shape - SKP_SMULBB( SHAPE_LPC_WIN_MS, psEnc->sCmn.fs_kHz ) + psEnc->sCmn.frame_length / NB_SUBFR;

    /****************/
    /* CONTROL SNR  */
    /****************/
    /* Reduce SNR_dB values if recent bitstream has exceeded TargetRate */
    psEncCtrl->current_SNR_dB_Q7 = psEnc->SNR_dB_Q7 - SKP_SMULWB( SKP_LSHIFT( ( SKP_int32 )psEnc->BufferedInChannel_ms, 7 ), 3277 );

    /* Reduce SNR_dB if inband FEC used */
    if( psEnc->speech_activity_Q8 > LBRR_SPEECH_ACTIVITY_THRES_Q8 ) {
        psEncCtrl->current_SNR_dB_Q7 -= SKP_RSHIFT( psEnc->inBandFEC_SNR_comp_Q8, 1 );
    }

    /****************/
    /* GAIN CONTROL */
    /****************/
    /* Input quality is the average of the quality in the lowest two VAD bands */
    psEncCtrl->input_quality_Q14 = ( SKP_int )SKP_RSHIFT( ( SKP_int32 )psEncCtrl->input_quality_bands_Q15[ 0 ] 
        + psEncCtrl->input_quality_bands_Q15[ 1 ], 2 );
    /* Coding quality level, between 0.0_Q0 and 1.0_Q0, but in Q14 */
    psEncCtrl->coding_quality_Q14 = SKP_RSHIFT( SKP_Silk_sigm_Q15( SKP_RSHIFT_ROUND( psEncCtrl->current_SNR_dB_Q7 - ( 18 << 7 ), 4 ) ), 1 );

    /* Reduce coding SNR during low speech activity */
    b_Q8 = ( 1 << 8 ) - psEnc->speech_activity_Q8;
    b_Q8 = SKP_SMULWB( SKP_LSHIFT( b_Q8, 8 ), b_Q8 );
    SNR_adj_dB_Q7 = SKP_SMLAWB( psEncCtrl->current_SNR_dB_Q7,
        SKP_SMULBB( -BG_SNR_DECR_dB_Q7 >> ( 4 + 1 ), b_Q8 ),                                            // Q11
        SKP_SMULWB( ( 1 << 14 ) + psEncCtrl->input_quality_Q14, psEncCtrl->coding_quality_Q14 ) );      // Q12

    if( psEncCtrl->sCmn.sigtype == SIG_TYPE_VOICED ) {
        /* Reduce gains for periodic signals */
        SNR_adj_dB_Q7 = SKP_SMLAWB( SNR_adj_dB_Q7, HARM_SNR_INCR_dB_Q7 << 1, psEnc->LTPCorr_Q15 );
    } else { 
        /* For unvoiced signals and low-quality input, adjust the quality slower than SNR_dB setting */
        SNR_adj_dB_Q7 = SKP_SMLAWB( SNR_adj_dB_Q7, 
            SKP_SMLAWB( 6 << ( 7 + 2 ), -104856, psEncCtrl->current_SNR_dB_Q7 ),    //-104856_Q18 = -0.4_Q0, Q9
            ( 1 << 14 ) - psEncCtrl->input_quality_Q14 );                           // Q14
    }

    /*************************/
    /* SPARSENESS PROCESSING */
    /*************************/
    /* Set quantizer offset */
    if( psEncCtrl->sCmn.sigtype == SIG_TYPE_VOICED ) {
        /* Initally set to 0; may be overruled in process_gains(..) */
        psEncCtrl->sCmn.QuantOffsetType = 0;
        psEncCtrl->sparseness_Q8 = 0;
    } else {
        /* Sparseness measure, based on relative fluctuations of energy per 2 milliseconds */
        nSamples = SKP_LSHIFT( psEnc->sCmn.fs_kHz, 1 );
        energy_variation_Q7 = 0;
        log_energy_prev_Q7  = 0;
        pitch_res_ptr = pitch_res;
        for( k = 0; k < FRAME_LENGTH_MS / 2; k++ ) {    
            SKP_Silk_sum_sqr_shift( &nrg, &scale, pitch_res_ptr, nSamples );
            nrg += SKP_RSHIFT( nSamples, scale );           // Q(-scale)
            
            log_energy_Q7 = SKP_Silk_lin2log( nrg );
            if( k > 0 ) {
                energy_variation_Q7 += SKP_abs( log_energy_Q7 - log_energy_prev_Q7 );
            }
            log_energy_prev_Q7 = log_energy_Q7;
            pitch_res_ptr += nSamples;
        }

        psEncCtrl->sparseness_Q8 = SKP_RSHIFT( SKP_Silk_sigm_Q15( SKP_SMULWB( energy_variation_Q7 - ( 5 << 7 ), 6554 ) ), 7 );    // 6554_Q16 = 0.1_Q0

        /* Set quantization offset depending on sparseness measure */
        if( psEncCtrl->sparseness_Q8 > SPARSENESS_THRESHOLD_QNT_OFFSET_Q8 ) {
            psEncCtrl->sCmn.QuantOffsetType = 0;
        } else {
            psEncCtrl->sCmn.QuantOffsetType = 1;
        }
        
        /* Increase coding SNR for sparse signals */
        SNR_adj_dB_Q7 = SKP_SMLAWB( SNR_adj_dB_Q7, SPARSE_SNR_INCR_dB_Q7 << 8, psEncCtrl->sparseness_Q8 - ( 1 << 7 ) );
    }

    /*******************************/
    /* Control bandwidth expansion */
    /*******************************/
    delta_Q16  = SKP_SMULWB( ( 1 << 16 ) - SKP_SMULBB( 3, psEncCtrl->coding_quality_Q14 ), LOW_RATE_BANDWIDTH_EXPANSION_DELTA_Q16 );
    BWExp1_Q16 = BANDWIDTH_EXPANSION_Q16 - delta_Q16;
    BWExp2_Q16 = BANDWIDTH_EXPANSION_Q16 + delta_Q16;
    if( psEnc->sCmn.fs_kHz == 24 ) {
        /* Less bandwidth expansion for super wideband */
        BWExp1_Q16 = ( 1 << 16 ) - SKP_SMULWB( SWB_BANDWIDTH_EXPANSION_REDUCTION_Q16, ( 1 << 16 ) - BWExp1_Q16 );
        BWExp2_Q16 = ( 1 << 16 ) - SKP_SMULWB( SWB_BANDWIDTH_EXPANSION_REDUCTION_Q16, ( 1 << 16 ) - BWExp2_Q16 );
    }
    /* BWExp1 will be applied after BWExp2, so make it relative */
    BWExp1_Q16 = SKP_DIV32_16( SKP_LSHIFT( BWExp1_Q16, 14 ), SKP_RSHIFT( BWExp2_Q16, 2 ) );

    /********************************************/
    /* Compute noise shaping AR coefs and gains */
    /********************************************/
    sz = ( SKP_int )SKP_SMULBB( SHAPE_LPC_WIN_MS, psEnc->sCmn.fs_kHz );
    for( k = 0; k < NB_SUBFR; k++ ) {
        /* Apply window */
        SKP_Silk_apply_sine_window( x_windowed, x_ptr, 0, SHAPE_LPC_WIN_MS * psEnc->sCmn.fs_kHz );

        /* Update pointer: next LPC analysis block */
        x_ptr += psEnc->sCmn.frame_length / NB_SUBFR;

        /* Calculate auto correlation */
        SKP_Silk_autocorr( auto_corr, &scale, x_windowed, sz, psEnc->sCmn.shapingLPCOrder + 1 );

        /* Add white noise, as a fraction of energy */
        auto_corr[0] = SKP_ADD32( auto_corr[0], SKP_max_32( SKP_SMULWB( SKP_RSHIFT( auto_corr[ 0 ], 4 ), SHAPE_WHITE_NOISE_FRACTION_Q20 ), 1 ) ); 

        /* Calculate the reflection coefficients using schur */
        nrg = SKP_Silk_schur64( refl_coef_Q16, auto_corr, psEnc->sCmn.shapingLPCOrder );

        /* Convert reflection coefficients to prediction coefficients */
        SKP_Silk_k2a_Q16( AR_Q24, refl_coef_Q16, psEnc->sCmn.shapingLPCOrder );

        /* Bandwidth expansion for synthesis filter shaping */
        SKP_Silk_bwexpander_32( AR_Q24, psEnc->sCmn.shapingLPCOrder, BWExp2_Q16 );

        /* Make sure to fit in Q13 SKP_int16 */
        SKP_Silk_LPC_fit( &psEncCtrl->AR2_Q13[ k * SHAPE_LPC_ORDER_MAX ], AR_Q24, 13, psEnc->sCmn.shapingLPCOrder );

        /* Compute noise shaping filter coefficients */
        SKP_memcpy(
            &psEncCtrl->AR1_Q13[ k * SHAPE_LPC_ORDER_MAX ], 
            &psEncCtrl->AR2_Q13[ k * SHAPE_LPC_ORDER_MAX ], 
            psEnc->sCmn.shapingLPCOrder * sizeof( SKP_int16 ) );

        /* Bandwidth expansion for analysis filter shaping */
        SKP_assert( BWExp1_Q16 <= ( 1 << 16 ) ); // If ever breaking, use LPC_stabilize() in these cases to stay within range
        SKP_Silk_bwexpander( &psEncCtrl->AR1_Q13[ k * SHAPE_LPC_ORDER_MAX ], psEnc->sCmn.shapingLPCOrder, BWExp1_Q16 );

        /* Increase residual energy */
        nrg = SKP_SMLAWB( nrg, SKP_RSHIFT( auto_corr[ 0 ], 8 ), SHAPE_MIN_ENERGY_RATIO_Q24 );

        Qnrg = -scale;          // range: -12...30
        SKP_assert( Qnrg >= -12 );
        SKP_assert( Qnrg <=  30 );

        /* Make sure that Qnrg is an even number */
        if( Qnrg & 1 ) {
            Qnrg -= 1;
            nrg >>= 1;
        }

        tmp32 = SKP_Silk_SQRT_APPROX( nrg );
        Qnrg >>= 1;             // range: -6...15

        sqrt_nrg[ k ] = tmp32;
        Qnrg_vec[ k ] = Qnrg;

        psEncCtrl->Gains_Q16[ k ] = SKP_LSHIFT_SAT32( tmp32, 16 - Qnrg );
        /* Ratio of prediction gains, in energy domain */
        SKP_Silk_LPC_inverse_pred_gain_Q13( &pre_nrg_Q30, &psEncCtrl->AR2_Q13[ k * SHAPE_LPC_ORDER_MAX ], psEnc->sCmn.shapingLPCOrder );
        SKP_Silk_LPC_inverse_pred_gain_Q13( &nrg,         &psEncCtrl->AR1_Q13[ k * SHAPE_LPC_ORDER_MAX ], psEnc->sCmn.shapingLPCOrder );

        lz = SKP_min_32( SKP_Silk_CLZ32( pre_nrg_Q30 ) - 1, 19 );
        pre_nrg_Q30 = SKP_DIV32( SKP_LSHIFT( pre_nrg_Q30, lz ), SKP_RSHIFT( nrg, 20 - lz ) + 1 ); // Q20
        pre_nrg_Q30 = SKP_RSHIFT( SKP_LSHIFT_SAT32( pre_nrg_Q30, 9 ), 1 );  /* Q28 */
        psEncCtrl->GainsPre_Q14[ k ] = ( SKP_int )SKP_Silk_SQRT_APPROX( pre_nrg_Q30 );
    }
void SKP_Silk_find_pred_coefs_FIX(SKP_Silk_encoder_state_FIX * psEnc,	/* I/O  encoder state                               */
				  SKP_Silk_encoder_control_FIX * psEncCtrl,	/* I/O  encoder control                             */
				  const int16_t res_pitch[]	/* I    Residual from pitch analysis                */
    )
{
	int i;
	int32_t WLTP[NB_SUBFR * LTP_ORDER * LTP_ORDER];
	int32_t invGains_Q16[NB_SUBFR], local_gains_Qx[NB_SUBFR],
	    Wght_Q15[NB_SUBFR];
	int NLSF_Q15[MAX_LPC_ORDER];
	const int16_t *x_ptr;
	int16_t *x_pre_ptr,
	    LPC_in_pre[NB_SUBFR * MAX_LPC_ORDER + MAX_FRAME_LENGTH];

	int32_t tmp, min_gain_Q16;
#if !VARQ
	int LZ;
#endif
	int LTP_corrs_rshift[NB_SUBFR];

	/* weighting for weighted least squares */
	min_gain_Q16 = int32_t_MAX >> 6;
	for (i = 0; i < NB_SUBFR; i++) {
		min_gain_Q16 = SKP_min(min_gain_Q16, psEncCtrl->Gains_Q16[i]);
	}
#if !VARQ
	LZ = SKP_Silk_CLZ32(min_gain_Q16) - 1;
	LZ = SKP_LIMIT(LZ, 0, 16);
	min_gain_Q16 = SKP_RSHIFT(min_gain_Q16, 2);	/* Ensure that maximum invGains_Q16 is within range of a 16 bit int */
#endif
	for (i = 0; i < NB_SUBFR; i++) {
		/* Divide to Q16 */
		assert(psEncCtrl->Gains_Q16[i] > 0);
#if VARQ
		/* Invert and normalize gains, and ensure that maximum invGains_Q16 is within range of a 16 bit int */
		invGains_Q16[i] =
		    SKP_DIV32_varQ(min_gain_Q16, psEncCtrl->Gains_Q16[i],
				   16 - 2);
#else
		invGains_Q16[i] =
		    SKP_DIV32(SKP_LSHIFT(min_gain_Q16, LZ),
			      SKP_RSHIFT(psEncCtrl->Gains_Q16[i], 16 - LZ));
#endif

		/* Ensure Wght_Q15 a minimum value 1 */
		invGains_Q16[i] = SKP_max(invGains_Q16[i], 363);

		/* Square the inverted gains */
		assert(invGains_Q16[i] == SKP_SAT16(invGains_Q16[i]));
		tmp = SKP_SMULWB(invGains_Q16[i], invGains_Q16[i]);
		Wght_Q15[i] = SKP_RSHIFT(tmp, 1);

		/* Invert the inverted and normalized gains */
		local_gains_Qx[i] =
		    SKP_DIV32((1 << (16 + Qx)), invGains_Q16[i]);
	}

	if (psEncCtrl->sCmn.sigtype == SIG_TYPE_VOICED) {
	/**********/
		/* VOICED */
	/**********/
		assert(psEnc->sCmn.frame_length -
			   psEnc->sCmn.predictLPCOrder >=
			   psEncCtrl->sCmn.pitchL[0] + LTP_ORDER / 2);

		/* LTP analysis */
		SKP_Silk_find_LTP_FIX(psEncCtrl->LTPCoef_Q14, WLTP,
				      &psEncCtrl->LTPredCodGain_Q7, res_pitch,
				      res_pitch +
				      SKP_RSHIFT(psEnc->sCmn.frame_length, 1),
				      psEncCtrl->sCmn.pitchL, Wght_Q15,
				      psEnc->sCmn.subfr_length,
				      psEnc->sCmn.frame_length,
				      LTP_corrs_rshift);

		/* Quantize LTP gain parameters */
		SKP_Silk_quant_LTP_gains_FIX(psEncCtrl->LTPCoef_Q14,
					     psEncCtrl->sCmn.LTPIndex,
					     &psEncCtrl->sCmn.PERIndex, WLTP,
					     psEnc->mu_LTP_Q8,
					     psEnc->sCmn.LTPQuantLowComplexity);

		/* Control LTP scaling */
		SKP_Silk_LTP_scale_ctrl_FIX(psEnc, psEncCtrl);

		/* Create LTP residual */
		SKP_Silk_LTP_analysis_filter_FIX(LPC_in_pre,
						 psEnc->x_buf +
						 psEnc->sCmn.frame_length -
						 psEnc->sCmn.predictLPCOrder,
						 psEncCtrl->LTPCoef_Q14,
						 psEncCtrl->sCmn.pitchL,
						 invGains_Q16, 16,
						 psEnc->sCmn.subfr_length,
						 psEnc->sCmn.predictLPCOrder);

	} else {
	/************/
		/* UNVOICED */
	/************/
		/* Create signal with prepended subframes, scaled by inverse gains */
		x_ptr =
		    psEnc->x_buf + psEnc->sCmn.frame_length -
		    psEnc->sCmn.predictLPCOrder;
		x_pre_ptr = LPC_in_pre;
		for (i = 0; i < NB_SUBFR; i++) {
			SKP_Silk_scale_copy_vector16(x_pre_ptr, x_ptr,
						     invGains_Q16[i],
						     psEnc->sCmn.subfr_length +
						     psEnc->sCmn.
						     predictLPCOrder);
			x_pre_ptr +=
			    psEnc->sCmn.subfr_length +
			    psEnc->sCmn.predictLPCOrder;
			x_ptr += psEnc->sCmn.subfr_length;
		}

		SKP_memset(psEncCtrl->LTPCoef_Q14, 0,
			   NB_SUBFR * LTP_ORDER * sizeof(int16_t));
		psEncCtrl->LTPredCodGain_Q7 = 0;
	}

	/* LPC_in_pre contains the LTP-filtered input for voiced, and the unfiltered input for unvoiced */
	TIC(FIND_LPC)
	    SKP_Silk_find_LPC_FIX(NLSF_Q15, &psEncCtrl->sCmn.NLSFInterpCoef_Q2,
				  psEnc->sPred.prev_NLSFq_Q15,
				  psEnc->sCmn.useInterpolatedNLSFs * (1 -
								      psEnc->
								      sCmn.
								      first_frame_after_reset),
				  psEnc->sCmn.predictLPCOrder, LPC_in_pre,
				  psEnc->sCmn.subfr_length +
				  psEnc->sCmn.predictLPCOrder);
	TOC(FIND_LPC)

	    /* Quantize LSFs */
	    TIC(PROCESS_LSFS)
	    SKP_Silk_process_NLSFs_FIX(psEnc, psEncCtrl, NLSF_Q15);
	TOC(PROCESS_LSFS)

	    /* Calculate residual energy using quantized LPC coefficients */
	    SKP_Silk_residual_energy_FIX(psEncCtrl->ResNrg, psEncCtrl->ResNrgQ,
					 LPC_in_pre, (const int16_t(*)[])psEncCtrl->PredCoef_Q12,
					 local_gains_Qx, Qx,
					 psEnc->sCmn.subfr_length,
					 psEnc->sCmn.predictLPCOrder);

	/* Copy to prediction struct for use in next frame for fluctuation reduction */
	SKP_memcpy(psEnc->sPred.prev_NLSFq_Q15, NLSF_Q15,
		   psEnc->sCmn.predictLPCOrder * sizeof(int));

}