Exemple #1
0
inline  uint32x2_t vsub(const uint32x2_t  & v0, const uint32x2_t  & v1) { return vsub_u32(v0, v1); }
unsigned int vp8_variance_halfpixvar16x16_hv_neon(
        const unsigned char *src_ptr,
        int  source_stride,
        const unsigned char *ref_ptr,
        int  recon_stride,
        unsigned int *sse) {
    int i;
    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
    int16x4_t d0s16, d1s16, d2s16, d3s16, d10s16, d11s16, d12s16, d13s16;
    int16x4_t d18s16, d19s16, d20s16, d21s16, d22s16, d23s16, d24s16, d25s16;
    uint32x2_t d0u32, d10u32;
    int64x1_t d0s64, d1s64, d2s64, d3s64;
    uint8x16_t q0u8, q1u8, q2u8, q3u8, q4u8, q5u8, q6u8, q7u8, q8u8, q9u8;
    uint16x8_t q0u16, q1u16, q5u16, q6u16, q9u16, q10u16, q11u16, q12u16;
    int32x4_t q13s32, q14s32, q15s32;
    int64x2_t q0s64, q1s64, q5s64;

    q13s32 = vdupq_n_s32(0);
    q14s32 = vdupq_n_s32(0);
    q15s32 = vdupq_n_s32(0);

    q0u8 = vld1q_u8(src_ptr);
    q1u8 = vld1q_u8(src_ptr + 16);
    src_ptr += source_stride;
    q1u8 = vextq_u8(q0u8, q1u8, 1);
    q0u8 = vrhaddq_u8(q0u8, q1u8);
    for (i = 0; i < 4; i++) {  // vp8_filt_fpo16x16s_4_0_loop_neon
        q2u8 = vld1q_u8(src_ptr);
        q3u8 = vld1q_u8(src_ptr + 16);
        src_ptr += source_stride;
        q4u8 = vld1q_u8(src_ptr);
        q5u8 = vld1q_u8(src_ptr + 16);
        src_ptr += source_stride;
        q6u8 = vld1q_u8(src_ptr);
        q7u8 = vld1q_u8(src_ptr + 16);
        src_ptr += source_stride;
        q8u8 = vld1q_u8(src_ptr);
        q9u8 = vld1q_u8(src_ptr + 16);
        src_ptr += source_stride;

        q3u8 = vextq_u8(q2u8, q3u8, 1);
        q5u8 = vextq_u8(q4u8, q5u8, 1);
        q7u8 = vextq_u8(q6u8, q7u8, 1);
        q9u8 = vextq_u8(q8u8, q9u8, 1);

        q1u8 = vrhaddq_u8(q2u8, q3u8);
        q2u8 = vrhaddq_u8(q4u8, q5u8);
        q3u8 = vrhaddq_u8(q6u8, q7u8);
        q4u8 = vrhaddq_u8(q8u8, q9u8);
        q0u8 = vrhaddq_u8(q0u8, q1u8);
        q1u8 = vrhaddq_u8(q1u8, q2u8);
        q2u8 = vrhaddq_u8(q2u8, q3u8);
        q3u8 = vrhaddq_u8(q3u8, q4u8);

        q5u8 = vld1q_u8(ref_ptr);
        ref_ptr += recon_stride;
        q6u8 = vld1q_u8(ref_ptr);
        ref_ptr += recon_stride;
        q7u8 = vld1q_u8(ref_ptr);
        ref_ptr += recon_stride;
        q8u8 = vld1q_u8(ref_ptr);
        ref_ptr += recon_stride;

        d0u8 = vget_low_u8(q0u8);
        d1u8 = vget_high_u8(q0u8);
        d2u8 = vget_low_u8(q1u8);
        d3u8 = vget_high_u8(q1u8);
        d4u8 = vget_low_u8(q2u8);
        d5u8 = vget_high_u8(q2u8);
        d6u8 = vget_low_u8(q3u8);
        d7u8 = vget_high_u8(q3u8);

        q9u16  = vsubl_u8(d0u8, vget_low_u8(q5u8));
        q10u16 = vsubl_u8(d1u8, vget_high_u8(q5u8));
        q11u16 = vsubl_u8(d2u8, vget_low_u8(q6u8));
        q12u16 = vsubl_u8(d3u8, vget_high_u8(q6u8));
        q0u16  = vsubl_u8(d4u8, vget_low_u8(q7u8));
        q1u16  = vsubl_u8(d5u8, vget_high_u8(q7u8));
        q5u16  = vsubl_u8(d6u8, vget_low_u8(q8u8));
        q6u16  = vsubl_u8(d7u8, vget_high_u8(q8u8));

        d18s16 = vreinterpret_s16_u16(vget_low_u16(q9u16));
        d19s16 = vreinterpret_s16_u16(vget_high_u16(q9u16));
        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q9u16));
        q14s32 = vmlal_s16(q14s32, d18s16, d18s16);
        q15s32 = vmlal_s16(q15s32, d19s16, d19s16);

        d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16));
        d21s16 = vreinterpret_s16_u16(vget_high_u16(q10u16));
        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q10u16));
        q14s32 = vmlal_s16(q14s32, d20s16, d20s16);
        q15s32 = vmlal_s16(q15s32, d21s16, d21s16);

        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q11u16));
        q14s32 = vmlal_s16(q14s32, d22s16, d22s16);
        q15s32 = vmlal_s16(q15s32, d23s16, d23s16);

        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q12u16));
        q14s32 = vmlal_s16(q14s32, d24s16, d24s16);
        q15s32 = vmlal_s16(q15s32, d25s16, d25s16);

        d0s16 = vreinterpret_s16_u16(vget_low_u16(q0u16));
        d1s16 = vreinterpret_s16_u16(vget_high_u16(q0u16));
        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q0u16));
        q14s32 = vmlal_s16(q14s32, d0s16, d0s16);
        q15s32 = vmlal_s16(q15s32, d1s16, d1s16);

        d2s16 = vreinterpret_s16_u16(vget_low_u16(q1u16));
        d3s16 = vreinterpret_s16_u16(vget_high_u16(q1u16));
        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q1u16));
        q14s32 = vmlal_s16(q14s32, d2s16, d2s16);
        q15s32 = vmlal_s16(q15s32, d3s16, d3s16);

        d10s16 = vreinterpret_s16_u16(vget_low_u16(q5u16));
        d11s16 = vreinterpret_s16_u16(vget_high_u16(q5u16));
        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q5u16));
        q14s32 = vmlal_s16(q14s32, d10s16, d10s16);
        q15s32 = vmlal_s16(q15s32, d11s16, d11s16);

        d12s16 = vreinterpret_s16_u16(vget_low_u16(q6u16));
        d13s16 = vreinterpret_s16_u16(vget_high_u16(q6u16));
        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q6u16));
        q14s32 = vmlal_s16(q14s32, d12s16, d12s16);
        q15s32 = vmlal_s16(q15s32, d13s16, d13s16);

        q0u8 = q4u8;
    }

    q15s32 = vaddq_s32(q14s32, q15s32);
    q0s64 = vpaddlq_s32(q13s32);
    q1s64 = vpaddlq_s32(q15s32);

    d0s64 = vget_low_s64(q0s64);
    d1s64 = vget_high_s64(q0s64);
    d2s64 = vget_low_s64(q1s64);
    d3s64 = vget_high_s64(q1s64);
    d0s64 = vadd_s64(d0s64, d1s64);
    d1s64 = vadd_s64(d2s64, d3s64);

    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
                      vreinterpret_s32_s64(d0s64));
    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);

    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 8);
    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);

    return vget_lane_u32(d0u32, 0);
}
void mdrc5b_apply_limiter(MDRC5B_LOCAL_STRUCT_T *HeapPtr)
{
    unsigned int LaIdx;
    unsigned int NumMainCh;
    unsigned int Samples;
    unsigned int ch, k, n;
    MMlong       *Ptr;
    MMlong       *Ptr2;

    MMlong       *MemOutPtr;
    MMshort      PeakdB;
    MMlong       PeakMax;
    int          RmsMeasure;
    MMshort      LimiterAtCoef;
    MMshort      LimiterReCoef;
    MMshort      LimiterGainMant[MDRC5B_BLOCK_SIZE + 1];
    MMshort      LimiterGainExp;
    MMshort      LimiterTargetGaindB;
    unsigned int LimiterHoldRem;
    unsigned int LimiterHtSamp;
    MMshort      Exp, TargetGain;
    MMshort      MaxShiftBits;
    unsigned int lookahead_len = (unsigned int) HeapPtr->LimiterLALen;
    unsigned int cpt1, cpt2;
    uint32x2x2_t Temp_u32x2x2;
    uint32x2_t   Ldbits_u32x2, Ldbits2_u32x2;
    uint32x2_t   bsl_u32x2;
    int32x2_t    LimGainMant_32x2;
    int64x2_t    TempX_64x2, MemOut_64x2;
    int64x2_t    Tmp_64x2;
    int64x2_t    LimiterGainExp_64x2, sample_64x2;
    int64x1_t    TempX_64x1, sample_64x1;
    int32_t      *LimiterGainMant_ptr;
    int32x2_t    Tmp_32x2, Ldbits_32x2, n_32x2;
    int32x2_t    TempX_low_32x2, TempX_high_32x2;
    int32x2x2_t  Tmp_32x2x2;
    int64x1_t    Peak_64x1, PeakMax_64x1, Tmp_64x1, diffX_64x1;
    int64x1_t    Peak_scale_pow_64x1, Peak_scale_64x1, Zero_s64x1;
    int64x1_t    MaxShiftBits_neg_64x1, MaxShiftBits_hd_64x1;
    int64x2_t    diffX_64x2;
    uint64x1_t   bsl_u64x1;
    int32x2_t    LimiterPeakCoef_32x2, diffX_low_32x2, diffX_high_32x2;
    int32x2_t    TargetGain_32x2;
    uint32x2x2_t Peak_u32x2x2;
    uint32x2_t   Peak_exp_u32x2, Peak_exp2_u32x2, Peak_mant_u32x2;
    int32x2_t    x_32x2, xn_32x2, PeakdB_32x2, Peak_exp_32x2;
    int32x2_t    LimiterTargetGaindB_32x2, Exp_32x2, LimiterCoef_32x2;
    int32x4_t    Tmp_32x4;


    START_PMU_MEASURE(PMU_MEASURE_MRDC5B_APPLY_LIMITER)

    START_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_COMPUTE_MAX_SHIFT_LEFT)

    Samples   = (unsigned int) HeapPtr->BlockSize;
    NumMainCh = (unsigned int) HeapPtr->NumMainCh;

    TempX_64x2 = vdupq_n_s64(0);
    for(ch = 0; ch < NumMainCh; ch++)
    {
        Ptr = HeapPtr->MainInBuf[ch];
        // compute the number of bits needs to be shifted to avoid overflow
        for(k = (Samples >> 1); k > 0; k--)
        {
            sample_64x2 = vld1q_s64(Ptr);
            Ptr        +=2;
            sample_64x2 = veorq_s64(sample_64x2, vshrq_n_s64(sample_64x2, 63));
            TempX_64x2  = vorrq_s64(TempX_64x2, sample_64x2);
        }
        if(Samples & 1)
        {
            sample_64x1 = vld1_s64(Ptr);
            sample_64x1 = veor_s64(sample_64x1, vshr_n_s64(sample_64x1, 63));
            TempX_64x2  = vorrq_s64(TempX_64x2, vcombine_s64(sample_64x1, sample_64x1));
        }
    }
    TempX_64x1    = vorr_s64(vget_low_s64(TempX_64x2), vget_high_s64(TempX_64x2));
    Temp_u32x2x2  = vuzp_u32(vreinterpret_u32_s64(TempX_64x1), vreinterpret_u32_s64(TempX_64x1));
    bsl_u32x2     = vceq_u32(Temp_u32x2x2.val[1], vdup_n_u32(0));                  // MSB == 0 ?
    // use clz instead of cls because we are sure that input value is positive
    // and because cls(LSB) could be wrong (if MSB is equal to 0 and bit 31 of LSL is 1)
    // thus clz result will be 1 more than cls result (that's why you may see (Ldbits - 1)
    // instead of Ldbits below)
    Ldbits_u32x2  = vadd_u32(vclz_u32(Temp_u32x2x2.val[0]), vdup_n_u32(32));       // clz(LSB)+32
    Ldbits2_u32x2 = vclz_u32(Temp_u32x2x2.val[1]);                                 // clz(MSB)
    Ldbits_u32x2  = vbsl_u32(bsl_u32x2, Ldbits_u32x2, Ldbits2_u32x2);              // MSB == 0 ? clz(LSB)+32 : clz(MSB)
    bsl_u32x2     = vceq_u32(Ldbits_u32x2, vdup_n_u32(64));                        // Ldbits == 64 ? (i.e. TempX == 0 ?)
    // the aim of MaxShiftBits is that sample will be shifted so that it occupies
    // 24 significant bits for 24 bits samples or 32 significant bits for 32 bits samples
    // but we are in 64 bits architecture on CA9/NEON
    // so we must right shift of ((64 - 24) - (Ldbits - 1)) bits for 24 bits samples
    // or of ((64 - 32) - (Ldbits - 1)) bits for 32 bits samples
    // and we add 1 because it was done this way on MMDSP (I don't know why !)
#ifdef SAMPLES_24_BITS
    // MaxShiftBits = ((64 - 24) - (Ldbits - 1)) + 1
    //              = 42 - Ldbits
    Ldbits_32x2     = vsub_s32(vdup_n_s32(42), vreinterpret_s32_u32(Ldbits_u32x2));
#else // SAMPLES_24_BITS
    // MaxShiftBits = ((64 - 32) - (Ldbits - 1)) + 1
    //              = 34 - Ldbits
    Ldbits_32x2     = vsub_s32(vdup_n_s32(34), vreinterpret_s32_u32(Ldbits_u32x2));
#endif // SAMPLES_24_BITS
    Ldbits_32x2     = vmax_s32(vdup_n_s32(1), Ldbits_32x2);
    Ldbits_32x2     = vbsl_s32(bsl_u32x2, vdup_n_s32(1), Ldbits_32x2);              // if(TempX == 0) Ldbits = 1
    MaxShiftBits    = vget_lane_s32(Ldbits_32x2, 0);

    STOP_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_COMPUTE_MAX_SHIFT_LEFT)
#ifdef DEBUG_LIMITER_OUTPUT
    if((debug_cpt_samples >= DEBUG_CPT_MIN) && (debug_cpt_samples <= DEBUG_CPT_MAX))
    {
        char string[100];

        debug_write_string("MRDC5B_LIMITER_COMPUTE_MAX_SHIFT_LEFT\n");
        sprintf(string, "MaxShiftBits=%d\n", MaxShiftBits);
        debug_write_string(string);
    }
#endif  // DEBUG_LIMITER_OUTPUT


    START_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_INSERT_NEW_SUBBAND)

    // insert the new subband samples into the lookahead buffers
    RmsMeasure = HeapPtr->Limiter.RmsMeasure;

    LaIdx = (unsigned int) HeapPtr->LimiterLaIdx;
    if(LaIdx + Samples >= lookahead_len)
    {
        cpt1                  = lookahead_len - LaIdx;
        cpt2                  = Samples - cpt1;
        // update index
        HeapPtr->LimiterLaIdx = (int) cpt2;
    }
    else
    {
        cpt1                  = Samples;
        cpt2                  = 0;
        // update index
        HeapPtr->LimiterLaIdx = (int) (LaIdx + Samples);
    }

    LimiterPeakCoef_32x2  = vdup_n_s32(HeapPtr->LimiterPeakAtCoef);                               // LimiterPeakAtCoef, LimiterPeakAtCoef
    LimiterPeakCoef_32x2  = vset_lane_s32(HeapPtr->LimiterPeakReCoef, LimiterPeakCoef_32x2, 1);   // LimiterPeakAtCoef, LimiterPeakReCoef
    Peak_scale_64x1       = vdup_n_s64(HeapPtr->PrevShiftBits - MaxShiftBits);
    Peak_scale_pow_64x1   = vshl_n_s64(Peak_scale_64x1, 1);
    MaxShiftBits_neg_64x1 = vdup_n_s64(-MaxShiftBits);
#ifdef SAMPLES_24_BITS
    MaxShiftBits_hd_64x1  = vdup_n_s64(24 - MaxShiftBits);
#else // SAMPLES_24_BITS
    MaxShiftBits_hd_64x1  = vdup_n_s64(32 - MaxShiftBits);
#endif // SAMPLES_24_BITS
    PeakMax_64x1          = vdup_n_s64(0);

    for(ch = 0; ch < NumMainCh; ch++)
    {
        Ptr  = HeapPtr->MainInBuf[ch];
        Ptr2 = HeapPtr->LimiterLABuf[ch] + LaIdx;  // go to the first valid sample

        Peak_64x1 = vdup_n_s64(HeapPtr->LimiterPeak[ch]);
        if(RmsMeasure)
        {
            // compensate Peak according to the previous shift bits
            Peak_64x1 = vqrshl_s64(Peak_64x1, Peak_scale_pow_64x1);                                 // neg value => shift right rounding

            // rms measure
            for(k = cpt1; k > 0; k--)
            {
                Tmp_64x1        = vld1_s64(Ptr);
                Ptr++;
                vst1_s64(Ptr2, Tmp_64x1);
                Ptr2++;
                Tmp_64x1        = vqrshl_s64(Tmp_64x1, MaxShiftBits_neg_64x1);
                Tmp_64x2        = vcombine_s64(Tmp_64x1, Tmp_64x1);
                Tmp_32x2x2      = vuzp_s32(vget_low_s32(vreinterpretq_s32_s64(Tmp_64x2)), vget_high_s32(vreinterpretq_s32_s64(Tmp_64x2)));
                Tmp_32x2        = Tmp_32x2x2.val[0];                                                // LSB of Tmp_64x2 (MSB is dummy)
                TempX_64x2      = vqdmull_s32(Tmp_32x2, Tmp_32x2);
                TempX_64x1      = vget_low_s64(TempX_64x2);
                diffX_64x1      = vqsub_s64(Peak_64x1, TempX_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63));                 // sign(diffX)
                diffX_64x2      = vcombine_s64(diffX_64x1, diffX_64x1);
                diffX_low_32x2  = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32);                     // wextract_l(diffX), wextract_l(diffX)
                diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32);                                     // wround_L(diffX), wround_L(diffX)
                Tmp_64x2        = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2));    // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef)
                Tmp_64x2        = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2);     // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef))
                Tmp_64x2        = vqaddq_s64(TempX_64x2, Tmp_64x2);
                Peak_64x1       = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2));
                Tmp_64x1        = vqsub_s64(Peak_64x1, PeakMax_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63));                   // sign(Peak_64x1 - PeakMax_64x1)
                PeakMax_64x1    = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1);
            }
            Ptr2 = HeapPtr->LimiterLABuf[ch];
            for(k = cpt2; k > 0; k--)
            {
                Tmp_64x1        = vld1_s64(Ptr);
                Ptr++;
                vst1_s64(Ptr2, Tmp_64x1);
                Ptr2++;
                Tmp_64x1        = vqrshl_s64(Tmp_64x1, MaxShiftBits_neg_64x1);
                Tmp_64x2        = vcombine_s64(Tmp_64x1, Tmp_64x1);
                Tmp_32x2x2      = vuzp_s32(vget_low_s32(vreinterpretq_s32_s64(Tmp_64x2)), vget_high_s32(vreinterpretq_s32_s64(Tmp_64x2)));
                Tmp_32x2        = Tmp_32x2x2.val[0];                                                // LSB of Tmp_64x2 (MSB is dummy)
                TempX_64x2      = vqdmull_s32(Tmp_32x2, Tmp_32x2);
                TempX_64x1      = vget_low_s64(TempX_64x2);
                diffX_64x1      = vqsub_s64(Peak_64x1, TempX_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63));                 // sign(diffX)
                diffX_64x2      = vcombine_s64(diffX_64x1, diffX_64x1);
                diffX_low_32x2  = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32);                     // wextract_l(diffX), wextract_l(diffX)
                diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32);                                     // wround_L(diffX), wround_L(diffX)
                Tmp_64x2        = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2));    // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef)
                Tmp_64x2        = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2);     // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef))
                Tmp_64x2        = vqaddq_s64(TempX_64x2, Tmp_64x2);
                Peak_64x1       = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2));
                Tmp_64x1        = vqsub_s64(Peak_64x1, PeakMax_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63));                   // sign(Peak_64x1 - PeakMax_64x1)
                PeakMax_64x1    = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1);
            }
        }
        else
        {
            // compensate Peak according to the previous shift bits
            Peak_64x1  = vqrshl_s64(Peak_64x1, Peak_scale_64x1);

            // amplitude measure
            Zero_s64x1 = vdup_n_s64(0);
            for(k = cpt1; k > 0; k--)
            {
                Tmp_64x1        = vld1_s64(Ptr);
                Ptr++;
                vst1_s64(Ptr2, Tmp_64x1);
                Ptr2++;
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63));                   // sign(Tmp_64x1)
                TempX_64x1      = vqsub_s64(Zero_s64x1, Tmp_64x1);                                  // -Tmp_64x1
                TempX_64x1      = vbsl_s64(bsl_u64x1, TempX_64x1, Tmp_64x1);
                TempX_64x1      = vqrshl_s64(TempX_64x1, MaxShiftBits_hd_64x1);
                TempX_64x2      = vcombine_s64(TempX_64x1, TempX_64x1);
                diffX_64x1      = vqsub_s64(Peak_64x1, TempX_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63));                 // sign(diffX)
                diffX_64x2      = vcombine_s64(diffX_64x1, diffX_64x1);
                diffX_low_32x2  = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32);                     // wextract_l(diffX), wextract_l(diffX)
                diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32);                                     // wround_L(diffX), wround_L(diffX)
                Tmp_64x2        = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2));    // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef)
                Tmp_64x2        = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2);     // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef))
                Tmp_64x2        = vqaddq_s64(TempX_64x2, Tmp_64x2);
                Peak_64x1       = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2));
                Tmp_64x1        = vqsub_s64(Peak_64x1, PeakMax_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63));                   // sign(Peak_64x1 - PeakMax_64x1)
                PeakMax_64x1    = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1);
            }
            Ptr2 = HeapPtr->LimiterLABuf[ch];
            for(k = cpt2; k > 0; k--)
            {
                Tmp_64x1        = vld1_s64(Ptr);
                Ptr++;
                vst1_s64(Ptr2, Tmp_64x1);
                Ptr2++;
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63));                   // sign(Tmp_64x1)
                TempX_64x1      = vqsub_s64(Zero_s64x1, Tmp_64x1);                                  // -Tmp_64x1
                TempX_64x1      = vbsl_s64(bsl_u64x1, TempX_64x1, Tmp_64x1);
                TempX_64x1      = vqrshl_s64(TempX_64x1, MaxShiftBits_hd_64x1);
                TempX_64x2      = vcombine_s64(TempX_64x1, TempX_64x1);
                diffX_64x1      = vqsub_s64(Peak_64x1, TempX_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(diffX_64x1, 63));                 // sign(diffX)
                diffX_64x2      = vcombine_s64(diffX_64x1, diffX_64x1);
                diffX_low_32x2  = vshrn_n_s64(vshlq_n_s64(diffX_64x2, 32), 32);                     // wextract_l(diffX), wextract_l(diffX)
                diffX_high_32x2 = vrshrn_n_s64(diffX_64x2, 32);                                     // wround_L(diffX), wround_L(diffX)
                Tmp_64x2        = vmovl_s32(vqrdmulh_s32(LimiterPeakCoef_32x2, diffX_low_32x2));    // (MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), (MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef)
                Tmp_64x2        = vqdmlal_s32(Tmp_64x2, LimiterPeakCoef_32x2, diffX_high_32x2);     // wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakAtCoef), wL_fmul(wround_L(diffX), LimiterPeakAtCoef)), wL_addsat((MMlong) wfmulr(wextract_l(diffX), LimiterPeakReCoef), wL_fmul(wround_L(diffX), LimiterPeakReCoef))
                Tmp_64x2        = vqaddq_s64(TempX_64x2, Tmp_64x2);
                Peak_64x1       = vbsl_s64(bsl_u64x1, vget_low_s64(Tmp_64x2), vget_high_s64(Tmp_64x2));
                Tmp_64x1        = vqsub_s64(Peak_64x1, PeakMax_64x1);
                bsl_u64x1       = vreinterpret_u64_s64(vshr_n_s64(Tmp_64x1, 63));                   // sign(Peak_64x1 - PeakMax_64x1)
                PeakMax_64x1    = vbsl_s64(bsl_u64x1, PeakMax_64x1, Peak_64x1);
            }
        }

        HeapPtr->LimiterPeak[ch] = vget_lane_s64(Peak_64x1, 0);                                     // save history
    }  // for(ch = 0...)
    PeakMax                = vget_lane_s64(PeakMax_64x1, 0);
    HeapPtr->PrevShiftBits = MaxShiftBits;

    STOP_PMU_MEASURE(PMU_MEASURE_MRDC5B_LIMITER_INSERT_NEW_SUBBAND)


    if(PeakMax < MDRC5B_ALMOST_ZERO_THRESH)
    {
        PeakdB = (MDRC5B_POWER_DB_MINUS_INF << 16); // 8.16, [-128.0, 127.0] dB
    }
    else
    {
        Peak_u32x2x2    = vuzp_u32(vreinterpret_u32_s64(PeakMax_64x1), vreinterpret_u32_s64(PeakMax_64x1));
        bsl_u32x2       = vceq_u32(Peak_u32x2x2.val[1], vdup_n_u32(0));
        Peak_exp_u32x2  = vadd_u32(vclz_u32(Peak_u32x2x2.val[0]), vdup_n_u32(32));
        Peak_exp2_u32x2 = vclz_u32(Peak_u32x2x2.val[1]);
        Peak_exp_u32x2  = vbsl_u32(bsl_u32x2, Peak_exp_u32x2, Peak_exp2_u32x2);
        Peak_mant_u32x2 = vrshrn_n_u64(vshlq_u64(vreinterpretq_u64_s64(vcombine_s64(PeakMax_64x1, PeakMax_64x1)), vreinterpretq_s64_u64(vmovl_u32(Peak_exp_u32x2))), 32);

        // if(Peak_mant >= sqrt(0.5))
        // {
        //     Peak_exp--;
        //     Peak_mant >>= 1;
        // }
        bsl_u32x2       = vcge_u32(Peak_mant_u32x2, vdup_n_u32(0xB504F334));
        Peak_exp_u32x2  = vbsl_u32(bsl_u32x2, vsub_u32(Peak_exp_u32x2, vdup_n_u32(1)), Peak_exp_u32x2);
        Peak_mant_u32x2 = vbsl_u32(bsl_u32x2, vrshr_n_u32(Peak_mant_u32x2, 1), Peak_mant_u32x2);

        Peak_exp_32x2 = vreinterpret_s32_u32(Peak_exp_u32x2);
#ifdef SAMPLES_24_BITS
        // correction of 16 bits if input samples are 24 bits
        Peak_exp_32x2 = vsub_s32(Peak_exp_32x2, vdup_n_s32(16));
#endif // SAMPLES_24_BITS

        // at this point : sqrt(0.5)/2 <= Peak_mant < sqrt(0.5)
        //
        // ln(1+x) = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8 + x^9/9 - x^10/10 ...    accuracy OK if |x| < 0.5
        // sqrt(0.5)/2 <= Peak_mant < sqrt(0.5)  =>  sqrt(0.5)-1 <= 2*Peak_mant-1 < 2*sqrt(0.5)-1
        //                                       =>  ln(Peak_mant) = ln(1+x)-ln(2) with x=2*Peak_mant-1, i.e. |x| < 0.414214...

        // x=2*PeakMax_mant-1 in Q31
        // => sqrt(0.5)-1 <= x < 2*sqrt(0.5)-1
        x_32x2      = vreinterpret_s32_u32(vsub_u32(Peak_mant_u32x2, vdup_n_u32(0x80000000)));

        PeakdB_32x2 = x_32x2;                                                                     // PeakdB = x

        xn_32x2     = vqrdmulh_s32(x_32x2, x_32x2);                                               // xn = x^2
        PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vrshr_n_s32(xn_32x2, 1));                            // PeakdB = x - x^2/2

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^3
        PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x2AAAAAAB)));      // PeakdB = x - x^2/2 + x^3/3

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^4
        PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vrshr_n_s32(xn_32x2, 2));                            // PeakdB = x - x^2/2 + x^3/3 - x^4/4

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^5
        PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x1999999A)));      // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^6
        PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x15555555)));      // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^7
        PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x12492492)));      // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^8
        PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vrshr_n_s32(xn_32x2, 3));                            // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^9
        PeakdB_32x2 = vqadd_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x0E38E38E)));      // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8 + x^9/9

        xn_32x2     = vqrdmulh_s32(xn_32x2, x_32x2);                                              // xn = x^10
        PeakdB_32x2 = vqsub_s32(PeakdB_32x2, vqrdmulh_s32(xn_32x2, vdup_n_s32(0x0CCCCCCD)));      // PeakdB = x - x^2/2 + x^3/3 - x^4/4 + x^5/5 - x^6/6 + x^7/7 - x^8/8 + x^9/9 - x^10/10

        // at this point : PeakMaxdB contains ln(1+x) in Q31

        if(RmsMeasure)
        {
            // dB(power) = 10*log10(power)

            // PeakMaxdB = 10*log10(PeakMax)+20*log10(2)*(HEADROOM+MaxShiftBits)
            //           = 10*ln(PeakMax)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 10/ln(10)*ln(PeakMax_mant*2^(-PeakMax_exp))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 10/ln(10)*(ln(PeakMax_mant)-PeakMax_exp*ln(2))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 10/ln(10)*ln(PeakMax_mant)-PeakMax_exp*10*ln(2)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 10/ln(10)*ln(PeakMax_mant)+10*ln(2)/ln(10)*(2*(HEADROOM+MaxShiftBits)-PeakMax_exp)
            //
            // => RmsdB = 10/ln(10)*ln(1+x)+10*ln(2)/ln(10)*(2*(HEADROOM+MaxShiftBits)-PeakMax_exp)
            // => RmsdB (Q16) = 0x457CB*ln(1+x)+0x302A3*(2*(HEADROOM+MaxShiftBits)-PeakMax_exp)

            // fractional mutiply 0x457CB*ln(1+x) in Q16
            PeakdB_32x2   = vqrdmulh_s32(PeakdB_32x2, vdup_n_s32(0x457CB));

            // PeakdB_exp = 2*(HEADROOM+MaxShiftBits)-PeakdB_exp
            Peak_exp_32x2 = vsub_s32(vdup_n_s32(2 * (HEADROOM + MaxShiftBits)), Peak_exp_32x2);

            // PeakMaxdB final value (integer mac 0x302A3*PeakdB_exp)
            PeakdB_32x2   = vmla_s32(PeakdB_32x2, Peak_exp_32x2, vdup_n_s32(0x302A3));
        }
        else
        {
            // dB(power) = 20*log10(abs)

            // PeakMaxdB = 20*log10(PeakMax)+20*log10(2)*(HEADROOM+MaxShiftBits)
            //           = 20*ln(PeakMax)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 20/ln(10)*ln(PeakMax_mant*2^(-PeakMax_exp))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 20/ln(10)*(ln(PeakMax_mant)-PeakMax_exp*ln(2))+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 20/ln(10)*ln(PeakMax_mant)-PeakMax_exp*20*ln(2)/ln(10)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits)
            //           = 20/ln(10)*ln(PeakMax_mant)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits-PeakMax_exp)
            //
            // => RmsdB = 20/ln(10)*ln(1+x)+20*ln(2)/ln(10)*(HEADROOM+MaxShiftBits-PeakMax_exp)
            // => RmsdB (Q16) = 0x8AF96*ln(1+x)+0x60546*(HEADROOM+MaxShiftBits-PeakMax_exp)

            // fractional mutiply 0x8AF96*ln(1+x) in Q16
            PeakdB_32x2     = vqrdmulh_s32(PeakdB_32x2, vdup_n_s32(0x8AF96));

            // PeakdB_exp = HEADROOM+MaxShiftBits-PeakdB_exp
            Peak_exp_32x2 = vsub_s32(vdup_n_s32(HEADROOM + MaxShiftBits), Peak_exp_32x2);

            // PeakMaxdB final value (integer mac 0x60546*PeakdB_exp)
            PeakdB_32x2     = vmla_s32(PeakdB_32x2, Peak_exp_32x2, vdup_n_s32(0x60546));
        }
        PeakdB = vget_lane_s32(PeakdB_32x2, 0);
    }
#ifdef DEBUG_LIMITER_OUTPUT
    if((debug_cpt_samples >= DEBUG_CPT_MIN) && (debug_cpt_samples <= DEBUG_CPT_MAX))
    {
        char string[100];

        debug_write_string("MRDC5B_LIMITER_PEAKMAX_PEAKDB\n");
        sprintf(string, "PeakMax=0x%012llX, HEADROOM+MaxShiftBits=%d => PeakdB=0x%06X\n",
#ifdef SAMPLES_24_BITS
                        PeakMax & 0xFFFFFFFFFFFFLL,
#else // SAMPLES_24_BITS
                        (PeakMax >> 16) & 0xFFFFFFFFFFFFLL,
#endif // SAMPLES_24_BITS
                        HEADROOM + MaxShiftBits,
                        PeakdB & 0xFFFFFF);
        debug_write_string(string);
    }
unsigned int vp8_sub_pixel_variance16x16_neon_func(
        const unsigned char *src_ptr,
        int src_pixels_per_line,
        int xoffset,
        int yoffset,
        const unsigned char *dst_ptr,
        int dst_pixels_per_line,
        unsigned int *sse) {
    int i;
    DECLARE_ALIGNED_ARRAY(16, unsigned char, tmp, 528);
    unsigned char *tmpp;
    unsigned char *tmpp2;
    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
    uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d16u8, d17u8, d18u8;
    uint8x8_t d19u8, d20u8, d21u8;
    int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
    uint32x2_t d0u32, d10u32;
    int64x1_t d0s64, d1s64, d2s64, d3s64;
    uint8x16_t q0u8, q1u8, q2u8, q3u8, q4u8, q5u8, q6u8, q7u8, q8u8, q9u8;
    uint8x16_t q10u8, q11u8, q12u8, q13u8, q14u8, q15u8;
    uint16x8_t q1u16, q2u16, q3u16, q4u16, q5u16, q6u16, q7u16, q8u16;
    uint16x8_t q9u16, q10u16, q11u16, q12u16, q13u16, q14u16;
    int32x4_t q8s32, q9s32, q10s32;
    int64x2_t q0s64, q1s64, q5s64;

    tmpp2 = tmp + 272;
    tmpp = tmp;
    if (xoffset == 0) {  // secondpass_bfilter16x16_only
        d0u8 = vdup_n_u8(bilinear_taps_coeff[yoffset][0]);
        d1u8 = vdup_n_u8(bilinear_taps_coeff[yoffset][1]);

        q11u8 = vld1q_u8(src_ptr);
        src_ptr += src_pixels_per_line;
        for (i = 4; i > 0; i--) {
            q12u8 = vld1q_u8(src_ptr);
            src_ptr += src_pixels_per_line;
            q13u8 = vld1q_u8(src_ptr);
            src_ptr += src_pixels_per_line;
            q14u8 = vld1q_u8(src_ptr);
            src_ptr += src_pixels_per_line;
            q15u8 = vld1q_u8(src_ptr);
            src_ptr += src_pixels_per_line;

            __builtin_prefetch(src_ptr);
            __builtin_prefetch(src_ptr + src_pixels_per_line);
            __builtin_prefetch(src_ptr + src_pixels_per_line * 2);

            q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8);
            q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8);
            q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8);
            q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8);
            q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8);
            q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8);
            q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8);
            q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8);

            q1u16 = vmlal_u8(q1u16, vget_low_u8(q12u8), d1u8);
            q2u16 = vmlal_u8(q2u16, vget_high_u8(q12u8), d1u8);
            q3u16 = vmlal_u8(q3u16, vget_low_u8(q13u8), d1u8);
            q4u16 = vmlal_u8(q4u16, vget_high_u8(q13u8), d1u8);
            q5u16 = vmlal_u8(q5u16, vget_low_u8(q14u8), d1u8);
            q6u16 = vmlal_u8(q6u16, vget_high_u8(q14u8), d1u8);
            q7u16 = vmlal_u8(q7u16, vget_low_u8(q15u8), d1u8);
            q8u16 = vmlal_u8(q8u16, vget_high_u8(q15u8), d1u8);

            d2u8 = vqrshrn_n_u16(q1u16, 7);
            d3u8 = vqrshrn_n_u16(q2u16, 7);
            d4u8 = vqrshrn_n_u16(q3u16, 7);
            d5u8 = vqrshrn_n_u16(q4u16, 7);
            d6u8 = vqrshrn_n_u16(q5u16, 7);
            d7u8 = vqrshrn_n_u16(q6u16, 7);
            d8u8 = vqrshrn_n_u16(q7u16, 7);
            d9u8 = vqrshrn_n_u16(q8u16, 7);

            q1u8 = vcombine_u8(d2u8, d3u8);
            q2u8 = vcombine_u8(d4u8, d5u8);
            q3u8 = vcombine_u8(d6u8, d7u8);
            q4u8 = vcombine_u8(d8u8, d9u8);

            q11u8 = q15u8;

            vst1q_u8((uint8_t *)tmpp2, q1u8);
            tmpp2 += 16;
            vst1q_u8((uint8_t *)tmpp2, q2u8);
            tmpp2 += 16;
            vst1q_u8((uint8_t *)tmpp2, q3u8);
            tmpp2 += 16;
            vst1q_u8((uint8_t *)tmpp2, q4u8);
            tmpp2 += 16;
        }
    } else if (yoffset == 0) {  // firstpass_bfilter16x16_only
        d0u8 = vdup_n_u8(bilinear_taps_coeff[xoffset][0]);
        d1u8 = vdup_n_u8(bilinear_taps_coeff[xoffset][1]);

        for (i = 4; i > 0 ; i--) {
            d2u8 = vld1_u8(src_ptr);
            d3u8 = vld1_u8(src_ptr + 8);
            d4u8 = vld1_u8(src_ptr + 16);
            src_ptr += src_pixels_per_line;
            d5u8 = vld1_u8(src_ptr);
            d6u8 = vld1_u8(src_ptr + 8);
            d7u8 = vld1_u8(src_ptr + 16);
            src_ptr += src_pixels_per_line;
            d8u8 = vld1_u8(src_ptr);
            d9u8 = vld1_u8(src_ptr + 8);
            d10u8 = vld1_u8(src_ptr + 16);
            src_ptr += src_pixels_per_line;
            d11u8 = vld1_u8(src_ptr);
            d12u8 = vld1_u8(src_ptr + 8);
            d13u8 = vld1_u8(src_ptr + 16);
            src_ptr += src_pixels_per_line;

            __builtin_prefetch(src_ptr);
            __builtin_prefetch(src_ptr + src_pixels_per_line);
            __builtin_prefetch(src_ptr + src_pixels_per_line * 2);

            q7u16  = vmull_u8(d2u8, d0u8);
            q8u16  = vmull_u8(d3u8, d0u8);
            q9u16  = vmull_u8(d5u8, d0u8);
            q10u16 = vmull_u8(d6u8, d0u8);
            q11u16 = vmull_u8(d8u8, d0u8);
            q12u16 = vmull_u8(d9u8, d0u8);
            q13u16 = vmull_u8(d11u8, d0u8);
            q14u16 = vmull_u8(d12u8, d0u8);

            d2u8  = vext_u8(d2u8, d3u8, 1);
            d5u8  = vext_u8(d5u8, d6u8, 1);
            d8u8  = vext_u8(d8u8, d9u8, 1);
            d11u8 = vext_u8(d11u8, d12u8, 1);

            q7u16  = vmlal_u8(q7u16, d2u8, d1u8);
            q9u16  = vmlal_u8(q9u16, d5u8, d1u8);
            q11u16 = vmlal_u8(q11u16, d8u8, d1u8);
            q13u16 = vmlal_u8(q13u16, d11u8, d1u8);

            d3u8  = vext_u8(d3u8, d4u8, 1);
            d6u8  = vext_u8(d6u8, d7u8, 1);
            d9u8  = vext_u8(d9u8, d10u8, 1);
            d12u8 = vext_u8(d12u8, d13u8, 1);

            q8u16  = vmlal_u8(q8u16,  d3u8, d1u8);
            q10u16 = vmlal_u8(q10u16, d6u8, d1u8);
            q12u16 = vmlal_u8(q12u16, d9u8, d1u8);
            q14u16 = vmlal_u8(q14u16, d12u8, d1u8);

            d14u8 = vqrshrn_n_u16(q7u16, 7);
            d15u8 = vqrshrn_n_u16(q8u16, 7);
            d16u8 = vqrshrn_n_u16(q9u16, 7);
            d17u8 = vqrshrn_n_u16(q10u16, 7);
            d18u8 = vqrshrn_n_u16(q11u16, 7);
            d19u8 = vqrshrn_n_u16(q12u16, 7);
            d20u8 = vqrshrn_n_u16(q13u16, 7);
            d21u8 = vqrshrn_n_u16(q14u16, 7);

            q7u8  = vcombine_u8(d14u8, d15u8);
            q8u8  = vcombine_u8(d16u8, d17u8);
            q9u8  = vcombine_u8(d18u8, d19u8);
            q10u8 = vcombine_u8(d20u8, d21u8);

            vst1q_u8((uint8_t *)tmpp2, q7u8);
            tmpp2 += 16;
            vst1q_u8((uint8_t *)tmpp2, q8u8);
            tmpp2 += 16;
            vst1q_u8((uint8_t *)tmpp2, q9u8);
            tmpp2 += 16;
            vst1q_u8((uint8_t *)tmpp2, q10u8);
            tmpp2 += 16;
        }
    } else {
        d0u8 = vdup_n_u8(bilinear_taps_coeff[xoffset][0]);
        d1u8 = vdup_n_u8(bilinear_taps_coeff[xoffset][1]);

        d2u8 = vld1_u8(src_ptr);
        d3u8 = vld1_u8(src_ptr + 8);
        d4u8 = vld1_u8(src_ptr + 16);
        src_ptr += src_pixels_per_line;
        d5u8 = vld1_u8(src_ptr);
        d6u8 = vld1_u8(src_ptr + 8);
        d7u8 = vld1_u8(src_ptr + 16);
        src_ptr += src_pixels_per_line;
        d8u8 = vld1_u8(src_ptr);
        d9u8 = vld1_u8(src_ptr + 8);
        d10u8 = vld1_u8(src_ptr + 16);
        src_ptr += src_pixels_per_line;
        d11u8 = vld1_u8(src_ptr);
        d12u8 = vld1_u8(src_ptr + 8);
        d13u8 = vld1_u8(src_ptr + 16);
        src_ptr += src_pixels_per_line;

        // First Pass: output_height lines x output_width columns (17x16)
        for (i = 3; i > 0; i--) {
            q7u16  = vmull_u8(d2u8, d0u8);
            q8u16  = vmull_u8(d3u8, d0u8);
            q9u16  = vmull_u8(d5u8, d0u8);
            q10u16 = vmull_u8(d6u8, d0u8);
            q11u16 = vmull_u8(d8u8, d0u8);
            q12u16 = vmull_u8(d9u8, d0u8);
            q13u16 = vmull_u8(d11u8, d0u8);
            q14u16 = vmull_u8(d12u8, d0u8);

            d2u8  = vext_u8(d2u8, d3u8, 1);
            d5u8  = vext_u8(d5u8, d6u8, 1);
            d8u8  = vext_u8(d8u8, d9u8, 1);
            d11u8 = vext_u8(d11u8, d12u8, 1);

            q7u16  = vmlal_u8(q7u16, d2u8, d1u8);
            q9u16  = vmlal_u8(q9u16, d5u8, d1u8);
            q11u16 = vmlal_u8(q11u16, d8u8, d1u8);
            q13u16 = vmlal_u8(q13u16, d11u8, d1u8);

            d3u8  = vext_u8(d3u8, d4u8, 1);
            d6u8  = vext_u8(d6u8, d7u8, 1);
            d9u8  = vext_u8(d9u8, d10u8, 1);
            d12u8 = vext_u8(d12u8, d13u8, 1);

            q8u16  = vmlal_u8(q8u16,  d3u8, d1u8);
            q10u16 = vmlal_u8(q10u16, d6u8, d1u8);
            q12u16 = vmlal_u8(q12u16, d9u8, d1u8);
            q14u16 = vmlal_u8(q14u16, d12u8, d1u8);

            d14u8 = vqrshrn_n_u16(q7u16, 7);
            d15u8 = vqrshrn_n_u16(q8u16, 7);
            d16u8 = vqrshrn_n_u16(q9u16, 7);
            d17u8 = vqrshrn_n_u16(q10u16, 7);
            d18u8 = vqrshrn_n_u16(q11u16, 7);
            d19u8 = vqrshrn_n_u16(q12u16, 7);
            d20u8 = vqrshrn_n_u16(q13u16, 7);
            d21u8 = vqrshrn_n_u16(q14u16, 7);

            d2u8 = vld1_u8(src_ptr);
            d3u8 = vld1_u8(src_ptr + 8);
            d4u8 = vld1_u8(src_ptr + 16);
            src_ptr += src_pixels_per_line;
            d5u8 = vld1_u8(src_ptr);
            d6u8 = vld1_u8(src_ptr + 8);
            d7u8 = vld1_u8(src_ptr + 16);
            src_ptr += src_pixels_per_line;
            d8u8 = vld1_u8(src_ptr);
            d9u8 = vld1_u8(src_ptr + 8);
            d10u8 = vld1_u8(src_ptr + 16);
            src_ptr += src_pixels_per_line;
            d11u8 = vld1_u8(src_ptr);
            d12u8 = vld1_u8(src_ptr + 8);
            d13u8 = vld1_u8(src_ptr + 16);
            src_ptr += src_pixels_per_line;

            q7u8 = vcombine_u8(d14u8, d15u8);
            q8u8 = vcombine_u8(d16u8, d17u8);
            q9u8 = vcombine_u8(d18u8, d19u8);
            q10u8 = vcombine_u8(d20u8, d21u8);

            vst1q_u8((uint8_t *)tmpp, q7u8);
            tmpp += 16;
            vst1q_u8((uint8_t *)tmpp, q8u8);
            tmpp += 16;
            vst1q_u8((uint8_t *)tmpp, q9u8);
            tmpp += 16;
            vst1q_u8((uint8_t *)tmpp, q10u8);
            tmpp += 16;
        }

        // First-pass filtering for rest 5 lines
        d14u8 = vld1_u8(src_ptr);
        d15u8 = vld1_u8(src_ptr + 8);
        d16u8 = vld1_u8(src_ptr + 16);
        src_ptr += src_pixels_per_line;

        q9u16  = vmull_u8(d2u8, d0u8);
        q10u16 = vmull_u8(d3u8, d0u8);
        q11u16 = vmull_u8(d5u8, d0u8);
        q12u16 = vmull_u8(d6u8, d0u8);
        q13u16 = vmull_u8(d8u8, d0u8);
        q14u16 = vmull_u8(d9u8, d0u8);

        d2u8  = vext_u8(d2u8, d3u8, 1);
        d5u8  = vext_u8(d5u8, d6u8, 1);
        d8u8  = vext_u8(d8u8, d9u8, 1);

        q9u16  = vmlal_u8(q9u16, d2u8, d1u8);
        q11u16 = vmlal_u8(q11u16, d5u8, d1u8);
        q13u16 = vmlal_u8(q13u16, d8u8, d1u8);

        d3u8  = vext_u8(d3u8, d4u8, 1);
        d6u8  = vext_u8(d6u8, d7u8, 1);
        d9u8  = vext_u8(d9u8, d10u8, 1);

        q10u16 = vmlal_u8(q10u16, d3u8, d1u8);
        q12u16 = vmlal_u8(q12u16, d6u8, d1u8);
        q14u16 = vmlal_u8(q14u16, d9u8, d1u8);

        q1u16 = vmull_u8(d11u8, d0u8);
        q2u16 = vmull_u8(d12u8, d0u8);
        q3u16 = vmull_u8(d14u8, d0u8);
        q4u16 = vmull_u8(d15u8, d0u8);

        d11u8 = vext_u8(d11u8, d12u8, 1);
        d14u8 = vext_u8(d14u8, d15u8, 1);

        q1u16 = vmlal_u8(q1u16, d11u8, d1u8);
        q3u16 = vmlal_u8(q3u16, d14u8, d1u8);

        d12u8 = vext_u8(d12u8, d13u8, 1);
        d15u8 = vext_u8(d15u8, d16u8, 1);

        q2u16 = vmlal_u8(q2u16, d12u8, d1u8);
        q4u16 = vmlal_u8(q4u16, d15u8, d1u8);

        d10u8 = vqrshrn_n_u16(q9u16, 7);
        d11u8 = vqrshrn_n_u16(q10u16, 7);
        d12u8 = vqrshrn_n_u16(q11u16, 7);
        d13u8 = vqrshrn_n_u16(q12u16, 7);
        d14u8 = vqrshrn_n_u16(q13u16, 7);
        d15u8 = vqrshrn_n_u16(q14u16, 7);
        d16u8 = vqrshrn_n_u16(q1u16, 7);
        d17u8 = vqrshrn_n_u16(q2u16, 7);
        d18u8 = vqrshrn_n_u16(q3u16, 7);
        d19u8 = vqrshrn_n_u16(q4u16, 7);

        q5u8 = vcombine_u8(d10u8, d11u8);
        q6u8 = vcombine_u8(d12u8, d13u8);
        q7u8 = vcombine_u8(d14u8, d15u8);
        q8u8 = vcombine_u8(d16u8, d17u8);
        q9u8 = vcombine_u8(d18u8, d19u8);

        vst1q_u8((uint8_t *)tmpp, q5u8);
        tmpp += 16;
        vst1q_u8((uint8_t *)tmpp, q6u8);
        tmpp += 16;
        vst1q_u8((uint8_t *)tmpp, q7u8);
        tmpp += 16;
        vst1q_u8((uint8_t *)tmpp, q8u8);
        tmpp += 16;
        vst1q_u8((uint8_t *)tmpp, q9u8);

        // secondpass_filter
        d0u8 = vdup_n_u8(bilinear_taps_coeff[yoffset][0]);
        d1u8 = vdup_n_u8(bilinear_taps_coeff[yoffset][1]);

        tmpp = tmp;
        tmpp2 = tmpp + 272;
        q11u8 = vld1q_u8(tmpp);
        tmpp += 16;
        for (i = 4; i > 0; i--) {
            q12u8 = vld1q_u8(tmpp);
            tmpp += 16;
            q13u8 = vld1q_u8(tmpp);
            tmpp += 16;
            q14u8 = vld1q_u8(tmpp);
            tmpp += 16;
            q15u8 = vld1q_u8(tmpp);
            tmpp += 16;

            q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8);
            q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8);
            q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8);
            q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8);
            q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8);
            q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8);
            q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8);
            q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8);

            q1u16 = vmlal_u8(q1u16, vget_low_u8(q12u8), d1u8);
            q2u16 = vmlal_u8(q2u16, vget_high_u8(q12u8), d1u8);
            q3u16 = vmlal_u8(q3u16, vget_low_u8(q13u8), d1u8);
            q4u16 = vmlal_u8(q4u16, vget_high_u8(q13u8), d1u8);
            q5u16 = vmlal_u8(q5u16, vget_low_u8(q14u8), d1u8);
            q6u16 = vmlal_u8(q6u16, vget_high_u8(q14u8), d1u8);
            q7u16 = vmlal_u8(q7u16, vget_low_u8(q15u8), d1u8);
            q8u16 = vmlal_u8(q8u16, vget_high_u8(q15u8), d1u8);

            d2u8 = vqrshrn_n_u16(q1u16, 7);
            d3u8 = vqrshrn_n_u16(q2u16, 7);
            d4u8 = vqrshrn_n_u16(q3u16, 7);
            d5u8 = vqrshrn_n_u16(q4u16, 7);
            d6u8 = vqrshrn_n_u16(q5u16, 7);
            d7u8 = vqrshrn_n_u16(q6u16, 7);
            d8u8 = vqrshrn_n_u16(q7u16, 7);
            d9u8 = vqrshrn_n_u16(q8u16, 7);

            q1u8 = vcombine_u8(d2u8, d3u8);
            q2u8 = vcombine_u8(d4u8, d5u8);
            q3u8 = vcombine_u8(d6u8, d7u8);
            q4u8 = vcombine_u8(d8u8, d9u8);

            q11u8 = q15u8;

            vst1q_u8((uint8_t *)tmpp2, q1u8);
            tmpp2 += 16;
            vst1q_u8((uint8_t *)tmpp2, q2u8);
            tmpp2 += 16;
            vst1q_u8((uint8_t *)tmpp2, q3u8);
            tmpp2 += 16;
            vst1q_u8((uint8_t *)tmpp2, q4u8);
            tmpp2 += 16;
        }
    }

    // sub_pixel_variance16x16_neon
    q8s32 = vdupq_n_s32(0);
    q9s32 = vdupq_n_s32(0);
    q10s32 = vdupq_n_s32(0);

    tmpp = tmp + 272;
    for (i = 0; i < 8; i++) {  // sub_pixel_variance16x16_neon_loop
        q0u8 = vld1q_u8(tmpp);
        tmpp += 16;
        q1u8 = vld1q_u8(tmpp);
        tmpp += 16;
        q2u8 = vld1q_u8(dst_ptr);
        dst_ptr += dst_pixels_per_line;
        q3u8 = vld1q_u8(dst_ptr);
        dst_ptr += dst_pixels_per_line;

        d0u8 = vget_low_u8(q0u8);
        d1u8 = vget_high_u8(q0u8);
        d2u8 = vget_low_u8(q1u8);
        d3u8 = vget_high_u8(q1u8);

        q11u16 = vsubl_u8(d0u8, vget_low_u8(q2u8));
        q12u16 = vsubl_u8(d1u8, vget_high_u8(q2u8));
        q13u16 = vsubl_u8(d2u8, vget_low_u8(q3u8));
        q14u16 = vsubl_u8(d3u8, vget_high_u8(q3u8));

        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
        q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
        q10s32 = vmlal_s16(q10s32, d23s16, d23s16);

        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);

        d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
        d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16));
        q9s32 = vmlal_s16(q9s32, d26s16, d26s16);
        q10s32 = vmlal_s16(q10s32, d27s16, d27s16);

        d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
        d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16));
        q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
        q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
    }

    q10s32 = vaddq_s32(q10s32, q9s32);
    q0s64 = vpaddlq_s32(q8s32);
    q1s64 = vpaddlq_s32(q10s32);

    d0s64 = vget_low_s64(q0s64);
    d1s64 = vget_high_s64(q0s64);
    d2s64 = vget_low_s64(q1s64);
    d3s64 = vget_high_s64(q1s64);
    d0s64 = vadd_s64(d0s64, d1s64);
    d1s64 = vadd_s64(d2s64, d3s64);

    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
                      vreinterpret_s32_s64(d0s64));
    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);

    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 8);
    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);

    return vget_lane_u32(d0u32, 0);
}
Exemple #5
0
unsigned int vp8_variance16x8_neon(
        const unsigned char *src_ptr,
        int source_stride,
        const unsigned char *ref_ptr,
        int recon_stride,
        unsigned int *sse) {
    int i;
    int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
    uint32x2_t d0u32, d10u32;
    int64x1_t d0s64, d1s64;
    uint8x16_t q0u8, q1u8, q2u8, q3u8;
    uint16x8_t q11u16, q12u16, q13u16, q14u16;
    int32x4_t q8s32, q9s32, q10s32;
    int64x2_t q0s64, q1s64, q5s64;

    q8s32 = vdupq_n_s32(0);
    q9s32 = vdupq_n_s32(0);
    q10s32 = vdupq_n_s32(0);

    for (i = 0; i < 4; i++) {  // variance16x8_neon_loop
        q0u8 = vld1q_u8(src_ptr);
        src_ptr += source_stride;
        q1u8 = vld1q_u8(src_ptr);
        src_ptr += source_stride;
        __builtin_prefetch(src_ptr);

        q2u8 = vld1q_u8(ref_ptr);
        ref_ptr += recon_stride;
        q3u8 = vld1q_u8(ref_ptr);
        ref_ptr += recon_stride;
        __builtin_prefetch(ref_ptr);

        q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
        q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
        q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
        q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));

        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
        q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
        q10s32 = vmlal_s16(q10s32, d23s16, d23s16);

        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);

        d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
        d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16));
        q9s32 = vmlal_s16(q9s32, d26s16, d26s16);
        q10s32 = vmlal_s16(q10s32, d27s16, d27s16);

        d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
        d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16));
        q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
        q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
    }

    q10s32 = vaddq_s32(q10s32, q9s32);
    q0s64 = vpaddlq_s32(q8s32);
    q1s64 = vpaddlq_s32(q10s32);

    d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
    d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));

    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
                      vreinterpret_s32_s64(d0s64));
    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);

    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7);
    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);

    return vget_lane_u32(d0u32, 0);
}