示例#1
0
void synthesis_sub_band_down_sampled(Int32 Sr[], Int32 Si[], Int16 data[])
{

    Int16 k;
    Int16 *pt_data_1;
    Int32 exp_m0_25;
    const Int32 *pt_exp = exp_m0_25_phi;

    Int32 *XX = Sr;
    Int32 *YY = (Int32 *)data;
    Int32 tmp1;
    Int32 tmp2;

    for (k = 0; k < 32; k++)
    {
        exp_m0_25 = *(pt_exp++);
        tmp1 = Sr[k];
        tmp2 = Si[k];
        XX[k]    = cmplx_mul32_by_16(-tmp1,  tmp2, exp_m0_25);
        YY[31-k] = cmplx_mul32_by_16(tmp2,  tmp1, exp_m0_25);
    }

    mdct_32(XX);
    mdct_32(YY);

    for (k = 0; k < 32; k++)
    {
        Si[k] = YY[k];
    }

    pt_data_1 = data;

    for (k = 0; k < 16; k++)
    {
        *(pt_data_1++)  = (Int16)((XX[2*k  ] + Si[2*k  ]) >> 14);
        *(pt_data_1++)  = (Int16)((XX[2*k+1] - Si[2*k+1]) >> 14);
    }

    for (k = 15; k > -1; k--)
    {
        *(pt_data_1++)  = (Int16)(-(XX[2*k+1] + Si[2*k+1]) >> 14);
        *(pt_data_1++)  = (Int16)(-(XX[2*k  ] - Si[2*k  ]) >> 14);
    }

}
Int fwd_long_complex_rot(
    Int32 *Data_in,
    Int32 *Data_out,
    Int32  max)
{
    Int     i;
    const   Int32 *p_rotate;
    Int32   temp_re;
    Int32   temp_im;
    Int32   *pData_in_ref1;
    Int32   *pData_in_ref2;
    Int32   exp_jw;
    Int32   temp_re_32;
    Int32   temp_im_32;

    Int32   *pData_out_1;
    Int32   *pData_out_2;
    Int32   *pData_out_3;
    Int32   *pData_out_4;

    Int32 *pData_in_1;
    Int32 *pData_in_2;

    Int     exp;

    p_rotate       =  exp_rotation_N_2048;

    pData_in_ref1  =  Data_in;
    pData_in_ref2  = &Data_in[TWICE_FWD_LONG_CX_ROT_LENGTH];

    pData_out_1 = Data_out;
    pData_out_2 = &Data_out[LONG_WINDOW_LENGTH_m_1];
    pData_out_3 = &Data_out[LONG_WINDOW_LENGTH];
    pData_out_4 = &Data_out[TWICE_LONG_WINDOW_LENGTH_m_1];

    /*
     *  Data_out
     *                                   >>>>                   <<<<
     *                                pData_out_3             pData_out_4
     *      |             |             |             |             |
     * pData_out_1               pData_out_2
     *      >>>>                     <<<<
     */


    exp = 16 - pv_normalize(max);

    if (exp < 0)
    {
        exp = 0;
    }

    /*
     *  Apply  A/2^(diff) + B
     */


    pData_in_1 = pData_in_ref1;
    pData_in_2 = pData_in_ref2;

    for (i = FWD_LONG_CX_ROT_LENGTH; i != 0; i--)
    {

        /*
         * cos_n + j*sin_n == exp(j(2pi/N)(k+1/8))
         */

        exp_jw = *p_rotate++;

        /*
         *  Use auxiliary variables to avoid double accesses to memory.
         *  Data in is scaled to use only lower 16 bits.
         */

        temp_re =  *(pData_in_1++) >> exp;
        temp_im =  *(pData_in_1++) >> exp;

        /*
         *   Pre-rotation
         */

        temp_re_32  = (cmplx_mul32_by_16(temp_re,   temp_im,  exp_jw));
        temp_im_32  = (cmplx_mul32_by_16(temp_im,  -temp_re,  exp_jw));

        *(pData_out_1++) = - temp_re_32;
        *(pData_out_2--) =   temp_im_32;
        *(pData_out_3++) = - temp_im_32;
        *(pData_out_4--) =   temp_re_32;

        /*
         *   Pointer increment to jump over imag (1 & 4) or real parts
         *   (2 & 3)
         */
        pData_out_1++;
        pData_out_2--;
        pData_out_3++;
        pData_out_4--;

        /*
         *   Repeat procedure for odd index at the output
         */

        exp_jw = *p_rotate++;

        temp_re =  *(pData_in_2++) >> exp;
        temp_im =  *(pData_in_2++) >> exp;

        temp_re_32  = (cmplx_mul32_by_16(temp_re,   temp_im,  exp_jw));
        temp_im_32  = (cmplx_mul32_by_16(temp_im,  -temp_re,  exp_jw));

        *(pData_out_1++) = - temp_re_32;
        *(pData_out_2--) =   temp_im_32;
        *(pData_out_3++) = - temp_im_32;
        *(pData_out_4--) =   temp_re_32;

        pData_out_1++;
        pData_out_2--;
        pData_out_3++;
        pData_out_4--;

    }

    return (exp + 1);
}
void ps_all_pass_fract_delay_filter_type_II(UInt32 *delayBufIndex,
        Int32 sb_delay,
        const Int32 *ppFractDelayPhaseFactorSer,
        Int32 ***pppRealDelayRBufferSer,
        Int32 ***pppImagDelayRBufferSer,
        Int32 *rIn,
        Int32 *iIn,
        Int32 decayScaleFactor)
{

    Int32 cmplx;
    Int16 rTmp0;
    Int32 rTmp;
    Int32 iTmp;
    Int32 *pt_rTmp;
    Int32 *pt_iTmp;
    const Int16 *pt_delay;



    /*
     *  All pass filters
     *                         2
     *                        ___  Q_fract(k,m)*z^(-d(m))  -  a(m)*g_decay_slope(k)
     *   z^(-2)*phi_fract(k)* | |  ------------------------------------------------
     *                        m=0  1  - a(m)*g_decay_slope(k)*Q_fract(k,m)*z^(-d(m))
     *
     *
     *    Fractional delay matrix:
     *
     *    Q_fract(k,m) = exp(-j*pi*q(m)*f_center(k))       0<= k <= SUBQMF_GROUPS
     *
     *    Vectors: a(m), q(m), d(m) are constants
     *
     *    m                              m     0       1       2
     *                                 -------------------------------
     *    delay length                 d(m) == 3       4       5      (Fs > 32 KHz)
     *    fractional delay length      q(m) == 0.43    0.75    0.347
     *    filter coefficient           a(m) == 0.65144 0.56472 0.48954
     *
     *             g_decay_slope(k) is given
     */


    Int32 tmp_r;
    Int32 tmp_i;

    pt_rTmp = &pppRealDelayRBufferSer[0][*(delayBufIndex)][sb_delay];
    pt_iTmp = &pppImagDelayRBufferSer[0][*(delayBufIndex++)][sb_delay];

    cmplx  = *(ppFractDelayPhaseFactorSer++);        /* Q_fract(k,m)  */
    pt_delay = aRevLinkDecaySerCoeff[decayScaleFactor];
    tmp_r = *pt_rTmp << 1;
    tmp_i = *pt_iTmp << 1;

    rTmp = cmplx_mul32_by_16(tmp_r, -tmp_i,  cmplx);
    rTmp0  = *(pt_delay++);
    iTmp = cmplx_mul32_by_16(tmp_i,  tmp_r,  cmplx);


    iTmp     =  fxp_mac32_by_16(-*iIn << 1, rTmp0, iTmp);  /* Q_fract(k,m)*y(n-1) - a(m)*g_decay_slope(k)*x(n) */
    *pt_iTmp =  fxp_mac32_by_16(iTmp << 1, rTmp0, *iIn);   /* y(n) = x(n) + a(m)*g_decay_slope(k)*( Q_fract(k,m)*y(n-1) - a(m)*g_decay_slope(k)*x(n)) */
    *iIn = iTmp;

    rTmp     =  fxp_mac32_by_16(-*rIn << 1, rTmp0, rTmp);
    *pt_rTmp =  fxp_mac32_by_16(rTmp << 1, rTmp0, *rIn);
    *rIn = rTmp;

    pt_rTmp = &pppRealDelayRBufferSer[1][*(delayBufIndex)][sb_delay];
    pt_iTmp = &pppImagDelayRBufferSer[1][*(delayBufIndex++)][sb_delay];


    cmplx  = *(ppFractDelayPhaseFactorSer++);        /* Q_fract(k,m)  */
    tmp_r = *pt_rTmp << 1;
    tmp_i = *pt_iTmp << 1;

    rTmp = cmplx_mul32_by_16(tmp_r, -tmp_i,  cmplx);
    rTmp0  = *(pt_delay++);
    iTmp = cmplx_mul32_by_16(tmp_i,  tmp_r,  cmplx);
    iTmp     =  fxp_mac32_by_16(-*iIn << 1, rTmp0, iTmp);  /* Q_fract(k,m)*y(n-1) - a(m)*g_decay_slope(k)*x(n) */
    *pt_iTmp =  fxp_mac32_by_16(iTmp << 1, rTmp0, *iIn);   /* y(n) = x(n) + a(m)*g_decay_slope(k)*( Q_fract(k,m)*y(n-1) - a(m)*g_decay_slope(k)*x(n)) */
    *iIn = iTmp;

    rTmp     =  fxp_mac32_by_16(-*rIn << 1, rTmp0, rTmp);
    *pt_rTmp =  fxp_mac32_by_16(rTmp << 1, rTmp0, *rIn);
    *rIn = rTmp;

    pt_rTmp = &pppRealDelayRBufferSer[2][*(delayBufIndex)][sb_delay];
    pt_iTmp = &pppImagDelayRBufferSer[2][*(delayBufIndex)][sb_delay];


    cmplx  = *(ppFractDelayPhaseFactorSer);        /* Q_fract(k,m)  */
    tmp_r = *pt_rTmp << 1;
    tmp_i = *pt_iTmp << 1;

    rTmp = cmplx_mul32_by_16(tmp_r, -tmp_i,  cmplx);
    rTmp0  = *(pt_delay);
    iTmp = cmplx_mul32_by_16(tmp_i,  tmp_r,  cmplx);


    iTmp     =  fxp_mac32_by_16(-*iIn, rTmp0, iTmp);    /* Q_fract(k,m)*y(n-1) - a(m)*g_decay_slope(k)*x(n) */
    *pt_iTmp =  fxp_mac32_by_16(iTmp, rTmp0, *iIn);     /* y(n) = x(n) + a(m)*g_decay_slope(k)*( Q_fract(k,m)*y(n-1) - a(m)*g_decay_slope(k)*x(n)) */
    *iIn = iTmp << 2;

    rTmp     =  fxp_mac32_by_16(-*rIn, rTmp0, rTmp);
    *pt_rTmp =  fxp_mac32_by_16(rTmp, rTmp0, *rIn);
    *rIn = rTmp << 2;

}