示例#1
0
文件: dct-c.c 项目: 0x0B501E7E/x264
static void avc_dct4x4dc_msa( int16_t *p_src, int16_t *p_dst,
                              int32_t i_src_stride )
{
    v8i16 src0, src1, src2, src3, ver_res0, ver_res1, ver_res2, ver_res3;
    v4i32 src0_r, src1_r, src2_r, src3_r, tmp0, tmp1, tmp2, tmp3;
    v4i32 hor_res0, hor_res1, hor_res2, hor_res3;
    v4i32 ver_res0_r, ver_res1_r, ver_res2_r, ver_res3_r;

    LD_SH4( p_src, i_src_stride, src0, src1, src2, src3 );
    UNPCK_R_SH_SW( src0, src0_r );
    UNPCK_R_SH_SW( src1, src1_r );
    UNPCK_R_SH_SW( src2, src2_r );
    UNPCK_R_SH_SW( src3, src3_r );
    BUTTERFLY_4( src0_r, src2_r, src3_r, src1_r,
                 tmp0, tmp3, tmp2, tmp1 );
    BUTTERFLY_4( tmp0, tmp1, tmp2, tmp3,
                 hor_res0, hor_res3, hor_res2, hor_res1 );
    TRANSPOSE4x4_SW_SW( hor_res0, hor_res1, hor_res2, hor_res3,
                        hor_res0, hor_res1, hor_res2, hor_res3 );
    BUTTERFLY_4( hor_res0, hor_res2, hor_res3, hor_res1,
                 tmp0, tmp3, tmp2, tmp1 );
    BUTTERFLY_4( tmp0, tmp1, tmp2, tmp3,
                 ver_res0_r, ver_res3_r, ver_res2_r, ver_res1_r );
    SRARI_W4_SW( ver_res0_r, ver_res1_r, ver_res2_r, ver_res3_r, 1 );
    PCKEV_H4_SH( ver_res0_r, ver_res0_r, ver_res1_r, ver_res1_r,
                 ver_res2_r, ver_res2_r, ver_res3_r, ver_res3_r,
                 ver_res0, ver_res1, ver_res2, ver_res3 );
    PCKOD_D2_SH( ver_res1, ver_res0, ver_res3, ver_res2, ver_res0, ver_res2 );
    ST_SH2( ver_res0, ver_res2, p_dst, 8 );
}
示例#2
0
文件: dct-c.c 项目: 0x0B501E7E/x264
static void avc_idct4x4dc_msa( int16_t *p_src, int32_t i_src_stride,
                               int16_t *p_dst, int32_t i_dst_stride )
{
    v8i16 src0, src1, src2, src3;
    v4i32 src0_r, src1_r, src2_r, src3_r;
    v4i32 hres0, hres1, hres2, hres3;
    v8i16 vres0, vres1, vres2, vres3;
    v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
    v2i64 res0, res1;

    LD_SH4( p_src, i_src_stride, src0, src1, src2, src3 );
    UNPCK_R_SH_SW( src0, src0_r );
    UNPCK_R_SH_SW( src1, src1_r );
    UNPCK_R_SH_SW( src2, src2_r );
    UNPCK_R_SH_SW( src3, src3_r );
    BUTTERFLY_4( src0_r, src2_r, src3_r, src1_r, vec0, vec3, vec2, vec1 );
    BUTTERFLY_4( vec0, vec1, vec2, vec3, hres0, hres3, hres2, hres1 );
    TRANSPOSE4x4_SW_SW( hres0, hres1, hres2, hres3,
                        hres0, hres1, hres2, hres3 );
    BUTTERFLY_4( hres0, hres2, hres3, hres1, vec0, vec3, vec2, vec1 );
    BUTTERFLY_4( vec0, vec1, vec2, vec3, vec4, vec7, vec6, vec5 );
    PCKEV_H4_SH( vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7,
                 vres0, vres1, vres2, vres3 );
    PCKOD_D2_SD( vres1, vres0, vres3, vres2, res0, res1 );
    ST8x4_UB( res0, res1, p_dst, i_dst_stride * 2 );
}
示例#3
0
void aom_iwht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
                            int32_t dst_stride) {
  v8i16 in0, in1, in2, in3;
  v4i32 in0_r, in1_r, in2_r, in3_r, in4_r;

  /* load vector elements of 4x4 block */
  LD4x4_SH(input, in0, in2, in3, in1);
  TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1);
  UNPCK_R_SH_SW(in0, in0_r);
  UNPCK_R_SH_SW(in2, in2_r);
  UNPCK_R_SH_SW(in3, in3_r);
  UNPCK_R_SH_SW(in1, in1_r);
  SRA_4V(in0_r, in1_r, in2_r, in3_r, UNIT_QUANT_SHIFT);

  in0_r += in2_r;
  in3_r -= in1_r;
  in4_r = (in0_r - in3_r) >> 1;
  in1_r = in4_r - in1_r;
  in2_r = in4_r - in2_r;
  in0_r -= in1_r;
  in3_r += in2_r;

  TRANSPOSE4x4_SW_SW(in0_r, in1_r, in2_r, in3_r, in0_r, in1_r, in2_r, in3_r);

  in0_r += in1_r;
  in2_r -= in3_r;
  in4_r = (in0_r - in2_r) >> 1;
  in3_r = in4_r - in3_r;
  in1_r = in4_r - in1_r;
  in0_r -= in3_r;
  in2_r += in1_r;

  PCKEV_H4_SH(in0_r, in0_r, in1_r, in1_r, in2_r, in2_r, in3_r, in3_r, in0, in1,
              in2, in3);
  ADDBLK_ST4x4_UB(in0, in3, in1, in2, dst, dst_stride);
}
示例#4
0
文件: dct-c.c 项目: 0x0B501E7E/x264
static void avc_idct8_addblk_msa( uint8_t *p_dst, int16_t *p_src,
                                  int32_t i_dst_stride )
{
    v8i16 src0, src1, src2, src3, src4, src5, src6, src7;
    v8i16 vec0, vec1, vec2, vec3;
    v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
    v8i16 res0, res1, res2, res3, res4, res5, res6, res7;
    v4i32 tmp0_r, tmp1_r, tmp2_r, tmp3_r, tmp4_r, tmp5_r, tmp6_r, tmp7_r;
    v4i32 tmp0_l, tmp1_l, tmp2_l, tmp3_l, tmp4_l, tmp5_l, tmp6_l, tmp7_l;
    v4i32 vec0_r, vec1_r, vec2_r, vec3_r, vec0_l, vec1_l, vec2_l, vec3_l;
    v4i32 res0_r, res1_r, res2_r, res3_r, res4_r, res5_r, res6_r, res7_r;
    v4i32 res0_l, res1_l, res2_l, res3_l, res4_l, res5_l, res6_l, res7_l;
    v16i8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
    v16i8 zeros = { 0 };

    p_src[ 0 ] += 32;

    LD_SH8( p_src, 8, src0, src1, src2, src3, src4, src5, src6, src7 );

    vec0 = src0 + src4;
    vec1 = src0 - src4;
    vec2 = src2 >> 1;
    vec2 = vec2 - src6;
    vec3 = src6 >> 1;
    vec3 = src2 + vec3;

    BUTTERFLY_4( vec0, vec1, vec2, vec3, tmp0, tmp1, tmp2, tmp3 );

    vec0 = src7 >> 1;
    vec0 = src5 - vec0 - src3 - src7;
    vec1 = src3 >> 1;
    vec1 = src1 - vec1 + src7 - src3;
    vec2 = src5 >> 1;
    vec2 = vec2 - src1 + src7 + src5;
    vec3 = src1 >> 1;
    vec3 = vec3 + src3 + src5 + src1;
    tmp4 = vec3 >> 2;
    tmp4 += vec0;
    tmp5 = vec2 >> 2;
    tmp5 += vec1;
    tmp6 = vec1 >> 2;
    tmp6 -= vec2;
    tmp7 = vec0 >> 2;
    tmp7 = vec3 - tmp7;

    BUTTERFLY_8( tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
                 res0, res1, res2, res3, res4, res5, res6, res7 );
    TRANSPOSE8x8_SH_SH( res0, res1, res2, res3, res4, res5, res6, res7,
                        res0, res1, res2, res3, res4, res5, res6, res7 );
    UNPCK_SH_SW( res0, tmp0_r, tmp0_l );
    UNPCK_SH_SW( res1, tmp1_r, tmp1_l );
    UNPCK_SH_SW( res2, tmp2_r, tmp2_l );
    UNPCK_SH_SW( res3, tmp3_r, tmp3_l );
    UNPCK_SH_SW( res4, tmp4_r, tmp4_l );
    UNPCK_SH_SW( res5, tmp5_r, tmp5_l );
    UNPCK_SH_SW( res6, tmp6_r, tmp6_l );
    UNPCK_SH_SW( res7, tmp7_r, tmp7_l );
    BUTTERFLY_4( tmp0_r, tmp0_l, tmp4_l, tmp4_r,
                 vec0_r, vec0_l, vec1_l, vec1_r );

    vec2_r = tmp2_r >> 1;
    vec2_l = tmp2_l >> 1;
    vec2_r -= tmp6_r;
    vec2_l -= tmp6_l;
    vec3_r = tmp6_r >> 1;
    vec3_l = tmp6_l >> 1;
    vec3_r += tmp2_r;
    vec3_l += tmp2_l;

    BUTTERFLY_4( vec0_r, vec1_r, vec2_r, vec3_r,
                 tmp0_r, tmp2_r, tmp4_r, tmp6_r );
    BUTTERFLY_4( vec0_l, vec1_l, vec2_l, vec3_l,
                 tmp0_l, tmp2_l, tmp4_l, tmp6_l );

    vec0_r = tmp7_r >> 1;
    vec0_l = tmp7_l >> 1;
    vec0_r = tmp5_r - vec0_r - tmp3_r - tmp7_r;
    vec0_l = tmp5_l - vec0_l - tmp3_l - tmp7_l;
    vec1_r = tmp3_r >> 1;
    vec1_l = tmp3_l >> 1;
    vec1_r = tmp1_r - vec1_r + tmp7_r - tmp3_r;
    vec1_l = tmp1_l - vec1_l + tmp7_l - tmp3_l;
    vec2_r = tmp5_r >> 1;
    vec2_l = tmp5_l >> 1;
    vec2_r = vec2_r - tmp1_r + tmp7_r + tmp5_r;
    vec2_l = vec2_l - tmp1_l + tmp7_l + tmp5_l;
    vec3_r = tmp1_r >> 1;
    vec3_l = tmp1_l >> 1;
    vec3_r = vec3_r + tmp3_r + tmp5_r + tmp1_r;
    vec3_l = vec3_l + tmp3_l + tmp5_l + tmp1_l;
    tmp1_r = vec3_r >> 2;
    tmp1_l = vec3_l >> 2;
    tmp1_r += vec0_r;
    tmp1_l += vec0_l;
    tmp3_r = vec2_r >> 2;
    tmp3_l = vec2_l >> 2;
    tmp3_r += vec1_r;
    tmp3_l += vec1_l;
    tmp5_r = vec1_r >> 2;
    tmp5_l = vec1_l >> 2;
    tmp5_r -= vec2_r;
    tmp5_l -= vec2_l;
    tmp7_r = vec0_r >> 2;
    tmp7_l = vec0_l >> 2;
    tmp7_r = vec3_r - tmp7_r;
    tmp7_l = vec3_l - tmp7_l;

    BUTTERFLY_4( tmp0_r, tmp0_l, tmp7_l, tmp7_r,
                 res0_r, res0_l, res7_l, res7_r );
    BUTTERFLY_4( tmp2_r, tmp2_l, tmp5_l, tmp5_r,
                 res1_r, res1_l, res6_l, res6_r );
    BUTTERFLY_4( tmp4_r, tmp4_l, tmp3_l, tmp3_r,
                 res2_r, res2_l, res5_l, res5_r );
    BUTTERFLY_4( tmp6_r, tmp6_l, tmp1_l, tmp1_r,
                 res3_r, res3_l, res4_l, res4_r );
    SRA_4V( res0_r, res0_l, res1_r, res1_l, 6 );
    SRA_4V( res2_r, res2_l, res3_r, res3_l, 6 );
    SRA_4V( res4_r, res4_l, res5_r, res5_l, 6 );
    SRA_4V( res6_r, res6_l, res7_r, res7_l, 6 );
    PCKEV_H4_SH( res0_l, res0_r, res1_l, res1_r, res2_l, res2_r, res3_l, res3_r,
                 res0, res1, res2, res3 );
    PCKEV_H4_SH( res4_l, res4_r, res5_l, res5_r, res6_l, res6_r, res7_l, res7_r,
                 res4, res5, res6, res7 );
    LD_SB8( p_dst, i_dst_stride,
            dst0, dst1, dst2, dst3,
            dst4, dst5, dst6, dst7 );
    ILVR_B4_SH( zeros, dst0, zeros, dst1, zeros, dst2, zeros, dst3,
                tmp0, tmp1, tmp2, tmp3 );
    ILVR_B4_SH( zeros, dst4, zeros, dst5, zeros, dst6, zeros, dst7,
                tmp4, tmp5, tmp6, tmp7 );
    ADD4( res0, tmp0, res1, tmp1, res2, tmp2, res3, tmp3,
          res0, res1, res2, res3 );
    ADD4( res4, tmp4, res5, tmp5, res6, tmp6, res7, tmp7,
          res4, res5, res6, res7 );
    CLIP_SH4_0_255( res0, res1, res2, res3 );
    CLIP_SH4_0_255( res4, res5, res6, res7 );
    PCKEV_B4_SB( res1, res0, res3, res2, res5, res4, res7, res6,
                 dst0, dst1, dst2, dst3 );
    ST8x4_UB( dst0, dst1, p_dst, i_dst_stride );
    p_dst += ( 4 * i_dst_stride );
    ST8x4_UB( dst2, dst3, p_dst, i_dst_stride );
}
示例#5
0
文件: quant-c.c 项目: xing2fan/x264
static void avc_dequant_8x8_msa( int16_t *p_dct, int32_t pi_dequant_mf[6][64],
                                 int32_t i_qp )
{
    const int32_t i_mf = i_qp % 6;
    const int32_t q_bits = i_qp / 6 - 6;
    v8i16 dct0, dct1, dct2, dct3, dct4, dct5, dct6, dct7;
    v4i32 dequant_m_f0, dequant_m_f1, dequant_m_f2, dequant_m_f3;
    v4i32 dequant_m_f4, dequant_m_f5, dequant_m_f6, dequant_m_f7;
    v4i32 dequant_m_f8, dequant_m_f9, dequant_m_f10, dequant_m_f11;
    v4i32 dequant_m_f12, dequant_m_f13, dequant_m_f14, dequant_m_f15;

    LD_SH8( p_dct, 8, dct0, dct1, dct2, dct3, dct4, dct5, dct6, dct7 );

    LD_SW2( pi_dequant_mf[i_mf], 4, dequant_m_f0, dequant_m_f1 );
    LD_SW2( pi_dequant_mf[i_mf] + 8, 4, dequant_m_f2, dequant_m_f3 );
    LD_SW2( pi_dequant_mf[i_mf] + 16, 4, dequant_m_f4, dequant_m_f5 );
    LD_SW2( pi_dequant_mf[i_mf] + 24, 4, dequant_m_f6, dequant_m_f7 );
    LD_SW2( pi_dequant_mf[i_mf] + 32, 4, dequant_m_f8, dequant_m_f9 );
    LD_SW2( pi_dequant_mf[i_mf] + 40, 4, dequant_m_f10, dequant_m_f11 );
    LD_SW2( pi_dequant_mf[i_mf] + 48, 4, dequant_m_f12, dequant_m_f13 );
    LD_SW2( pi_dequant_mf[i_mf] + 56, 4, dequant_m_f14, dequant_m_f15 );

    if( q_bits >= 0 )
    {
        v8i16 q_bits_vec;
        v8i16 dequant_mf_h0, dequant_mf_h1, dequant_mf_h2, dequant_mf_h3;
        v8i16 dequant_mf_h4, dequant_mf_h5, dequant_mf_h6, dequant_mf_h7;

        q_bits_vec = __msa_fill_h( q_bits );

        PCKEV_H4_SH( dequant_m_f1, dequant_m_f0, dequant_m_f3, dequant_m_f2,
                     dequant_m_f5, dequant_m_f4, dequant_m_f7, dequant_m_f6,
                     dequant_mf_h0, dequant_mf_h1,
                     dequant_mf_h2, dequant_mf_h3 );
        PCKEV_H4_SH( dequant_m_f9, dequant_m_f8, dequant_m_f11, dequant_m_f10,
                     dequant_m_f13, dequant_m_f12, dequant_m_f15, dequant_m_f14,
                     dequant_mf_h4, dequant_mf_h5,
                     dequant_mf_h6, dequant_mf_h7 );

        dct0 *= dequant_mf_h0;
        dct1 *= dequant_mf_h1;
        dct2 *= dequant_mf_h2;
        dct3 *= dequant_mf_h3;
        dct4 *= dequant_mf_h4;
        dct5 *= dequant_mf_h5;
        dct6 *= dequant_mf_h6;
        dct7 *= dequant_mf_h7;

        SLLI_4V( dct0, dct1, dct2, dct3, q_bits_vec );
        SLLI_4V( dct4, dct5, dct6, dct7, q_bits_vec );

        ST_SH8( dct0, dct1, dct2, dct3, dct4, dct5, dct6, dct7, p_dct, 8 );
    }
    else
    {
        const int32_t q_bits_add = 1 << ( -q_bits - 1 );
        v4i32 dct_signed_w0, dct_signed_w1, dct_signed_w2, dct_signed_w3;
        v4i32 dct_signed_w4, dct_signed_w5, dct_signed_w6, dct_signed_w7;
        v4i32 dct_signed_w8, dct_signed_w9, dct_signed_w10, dct_signed_w11;
        v4i32 dct_signed_w12, dct_signed_w13, dct_signed_w14, dct_signed_w15;
        v4i32 q_bits_vec, q_bits_vec_add;

        q_bits_vec_add = __msa_fill_w( q_bits_add );
        q_bits_vec = __msa_fill_w( -q_bits );

        UNPCK_SH_SW( dct0, dct_signed_w0, dct_signed_w1 );
        UNPCK_SH_SW( dct1, dct_signed_w2, dct_signed_w3 );
        UNPCK_SH_SW( dct2, dct_signed_w4, dct_signed_w5 );
        UNPCK_SH_SW( dct3, dct_signed_w6, dct_signed_w7 );
        UNPCK_SH_SW( dct4, dct_signed_w8, dct_signed_w9 );
        UNPCK_SH_SW( dct5, dct_signed_w10, dct_signed_w11 );
        UNPCK_SH_SW( dct6, dct_signed_w12, dct_signed_w13 );
        UNPCK_SH_SW( dct7, dct_signed_w14, dct_signed_w15 );

        dct_signed_w0 *= dequant_m_f0;
        dct_signed_w1 *= dequant_m_f1;
        dct_signed_w2 *= dequant_m_f2;
        dct_signed_w3 *= dequant_m_f3;
        dct_signed_w4 *= dequant_m_f4;
        dct_signed_w5 *= dequant_m_f5;
        dct_signed_w6 *= dequant_m_f6;
        dct_signed_w7 *= dequant_m_f7;
        dct_signed_w8 *= dequant_m_f8;
        dct_signed_w9 *= dequant_m_f9;
        dct_signed_w10 *= dequant_m_f10;
        dct_signed_w11 *= dequant_m_f11;
        dct_signed_w12 *= dequant_m_f12;
        dct_signed_w13 *= dequant_m_f13;
        dct_signed_w14 *= dequant_m_f14;
        dct_signed_w15 *= dequant_m_f15;

        dct_signed_w0 += q_bits_vec_add;
        dct_signed_w1 += q_bits_vec_add;
        dct_signed_w2 += q_bits_vec_add;
        dct_signed_w3 += q_bits_vec_add;
        dct_signed_w4 += q_bits_vec_add;
        dct_signed_w5 += q_bits_vec_add;
        dct_signed_w6 += q_bits_vec_add;
        dct_signed_w7 += q_bits_vec_add;
        dct_signed_w8 += q_bits_vec_add;
        dct_signed_w9 += q_bits_vec_add;
        dct_signed_w10 += q_bits_vec_add;
        dct_signed_w11 += q_bits_vec_add;
        dct_signed_w12 += q_bits_vec_add;
        dct_signed_w13 += q_bits_vec_add;
        dct_signed_w14 += q_bits_vec_add;
        dct_signed_w15 += q_bits_vec_add;

        SRA_4V( dct_signed_w0, dct_signed_w1, dct_signed_w2, dct_signed_w3,
                q_bits_vec );
        SRA_4V( dct_signed_w4, dct_signed_w5, dct_signed_w6, dct_signed_w7,
                q_bits_vec );
        SRA_4V( dct_signed_w8, dct_signed_w9, dct_signed_w10, dct_signed_w11,
                q_bits_vec );
        SRA_4V( dct_signed_w12, dct_signed_w13, dct_signed_w14, dct_signed_w15,
                q_bits_vec );
        PCKEV_H4_SH( dct_signed_w1, dct_signed_w0, dct_signed_w3, dct_signed_w2,
                     dct_signed_w5, dct_signed_w4, dct_signed_w7, dct_signed_w6,
                     dct0, dct1, dct2, dct3 );
        PCKEV_H4_SH( dct_signed_w9, dct_signed_w8, dct_signed_w11,
                     dct_signed_w10, dct_signed_w13, dct_signed_w12,
                     dct_signed_w15, dct_signed_w14, dct4, dct5, dct6, dct7 );
        ST_SH8( dct0, dct1, dct2, dct3, dct4, dct5, dct6, dct7, p_dct, 8 );
    }
}
示例#6
0
文件: quant-c.c 项目: xing2fan/x264
static int32_t avc_quant_8x8_msa( int16_t *p_dct, uint16_t *p_mf,
                                  uint16_t *p_bias )
{
    int32_t non_zero = 0;
    v8i16 dct0, dct1, dct2, dct3;
    v8i16 zero = { 0 };
    v8i16 dct0_mask, dct1_mask, dct2_mask, dct3_mask;
    v8i16 dct_h0, dct_h1, dct_h2, dct_h3, mf_h0, mf_h1, mf_h2, mf_h3;
    v8i16 bias_h0, bias_h1, bias_h2, bias_h3;
    v4i32 dct_w0, dct_w1, dct_w2, dct_w3, dct_w4, dct_w5, dct_w6, dct_w7;
    v4i32 dct_signed_w0, dct_signed_w1, dct_signed_w2, dct_signed_w3;
    v4i32 dct_signed_w4, dct_signed_w5, dct_signed_w6, dct_signed_w7;
    v4i32 mf_vec0, mf_vec1, mf_vec2, mf_vec3;
    v4i32 mf_vec4, mf_vec5, mf_vec6, mf_vec7;
    v4i32 bias0, bias1, bias2, bias3, bias4, bias5, bias6, bias7;

    LD_SH4( p_dct, 8, dct0, dct1, dct2, dct3 );

    dct0_mask = __msa_clei_s_h( dct0, 0 );
    dct1_mask = __msa_clei_s_h( dct1, 0 );
    dct2_mask = __msa_clei_s_h( dct2, 0 );
    dct3_mask = __msa_clei_s_h( dct3, 0 );

    UNPCK_SH_SW( dct0, dct_signed_w0, dct_signed_w1 );
    UNPCK_SH_SW( dct1, dct_signed_w2, dct_signed_w3 );
    UNPCK_SH_SW( dct2, dct_signed_w4, dct_signed_w5 );
    UNPCK_SH_SW( dct3, dct_signed_w6, dct_signed_w7 );
    LD_SH4( p_bias, 8, bias_h0, bias_h1, bias_h2, bias_h3 );
    ILVR_H4_SW( zero, bias_h0, zero, bias_h1, zero, bias_h2, zero, bias_h3,
                bias0, bias2, bias4, bias6 );
    ILVL_H4_SW( zero, bias_h0, zero, bias_h1, zero, bias_h2, zero, bias_h3,
                bias1, bias3, bias5, bias7 );
    LD_SH4( p_mf, 8, mf_h0, mf_h1, mf_h2, mf_h3 );
    ILVR_H4_SW( zero, mf_h0, zero, mf_h1, zero, mf_h2, zero, mf_h3,
                mf_vec0, mf_vec2, mf_vec4, mf_vec6 );
    ILVL_H4_SW( zero, mf_h0, zero, mf_h1, zero, mf_h2, zero, mf_h3,
                mf_vec1, mf_vec3, mf_vec5, mf_vec7 );

    dct_w0 = __msa_add_a_w( dct_signed_w0, bias0 );
    dct_w1 = __msa_add_a_w( dct_signed_w1, bias1 );
    dct_w2 = __msa_add_a_w( dct_signed_w2, bias2 );
    dct_w3 = __msa_add_a_w( dct_signed_w3, bias3 );
    dct_w4 = __msa_add_a_w( dct_signed_w4, bias4 );
    dct_w5 = __msa_add_a_w( dct_signed_w5, bias5 );
    dct_w6 = __msa_add_a_w( dct_signed_w6, bias6 );
    dct_w7 = __msa_add_a_w( dct_signed_w7, bias7 );

    dct_w0 *= mf_vec0;
    dct_w1 *= mf_vec1;
    dct_w2 *= mf_vec2;
    dct_w3 *= mf_vec3;
    dct_w4 *= mf_vec4;
    dct_w5 *= mf_vec5;
    dct_w6 *= mf_vec6;
    dct_w7 *= mf_vec7;

    SRA_4V( dct_w0, dct_w1, dct_w2, dct_w3, 16 );
    SRA_4V( dct_w4, dct_w5, dct_w6, dct_w7, 16 );
    PCKEV_H4_SH( dct_w1, dct_w0, dct_w3, dct_w2, dct_w5, dct_w4, dct_w7, dct_w6,
                 dct_h0, dct_h1, dct_h2, dct_h3 );
    SUB4( zero, dct_h0, zero, dct_h1, zero, dct_h2, zero, dct_h3,
          dct0, dct1, dct2, dct3 );

    dct0 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h0,
                                   ( v16u8 ) dct0, ( v16u8 ) dct0_mask );
    dct1 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h1,
                                   ( v16u8 ) dct1, ( v16u8 ) dct1_mask );
    dct2 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h2,
                                   ( v16u8 ) dct2, ( v16u8 ) dct2_mask );
    dct3 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h3,
                                   ( v16u8 ) dct3, ( v16u8 ) dct3_mask );

    non_zero = HADD_SW_S32( ( v4u32 )( dct_h0 + dct_h1 + dct_h2 + dct_h3 ) );
    ST_SH4( dct0, dct1, dct2, dct3, p_dct, 8 );
    LD_SH4( p_dct + 32, 8, dct0, dct1, dct2, dct3 );

    dct0_mask = __msa_clei_s_h( dct0, 0 );
    dct1_mask = __msa_clei_s_h( dct1, 0 );
    dct2_mask = __msa_clei_s_h( dct2, 0 );
    dct3_mask = __msa_clei_s_h( dct3, 0 );

    UNPCK_SH_SW( dct0, dct_signed_w0, dct_signed_w1 );
    UNPCK_SH_SW( dct1, dct_signed_w2, dct_signed_w3 );
    UNPCK_SH_SW( dct2, dct_signed_w4, dct_signed_w5 );
    UNPCK_SH_SW( dct3, dct_signed_w6, dct_signed_w7 );
    LD_SH4( p_bias + 32, 8, bias_h0, bias_h1, bias_h2, bias_h3 );
    ILVR_H4_SW( zero, bias_h0, zero, bias_h1, zero, bias_h2, zero, bias_h3,
                bias0, bias2, bias4, bias6 );
    ILVL_H4_SW( zero, bias_h0, zero, bias_h1, zero, bias_h2, zero, bias_h3,
                bias1, bias3, bias5, bias7 );
    LD_SH4( p_mf + 32, 8, mf_h0, mf_h1, mf_h2, mf_h3 );
    ILVR_H4_SW( zero, mf_h0, zero, mf_h1, zero, mf_h2, zero, mf_h3,
                mf_vec0, mf_vec2, mf_vec4, mf_vec6 );
    ILVL_H4_SW( zero, mf_h0, zero, mf_h1, zero, mf_h2, zero, mf_h3,
                mf_vec1, mf_vec3, mf_vec5, mf_vec7 );

    dct_w0 = __msa_add_a_w( dct_signed_w0, bias0 );
    dct_w1 = __msa_add_a_w( dct_signed_w1, bias1 );
    dct_w2 = __msa_add_a_w( dct_signed_w2, bias2 );
    dct_w3 = __msa_add_a_w( dct_signed_w3, bias3 );
    dct_w4 = __msa_add_a_w( dct_signed_w4, bias4 );
    dct_w5 = __msa_add_a_w( dct_signed_w5, bias5 );
    dct_w6 = __msa_add_a_w( dct_signed_w6, bias6 );
    dct_w7 = __msa_add_a_w( dct_signed_w7, bias7 );

    dct_w0 *= mf_vec0;
    dct_w1 *= mf_vec1;
    dct_w2 *= mf_vec2;
    dct_w3 *= mf_vec3;
    dct_w4 *= mf_vec4;
    dct_w5 *= mf_vec5;
    dct_w6 *= mf_vec6;
    dct_w7 *= mf_vec7;

    SRA_4V( dct_w0, dct_w1, dct_w2, dct_w3, 16 );
    SRA_4V( dct_w4, dct_w5, dct_w6, dct_w7, 16 );
    PCKEV_H2_SH( dct_w1, dct_w0, dct_w3, dct_w2, dct_h0, dct_h1 );
    PCKEV_H2_SH( dct_w5, dct_w4, dct_w7, dct_w6, dct_h2, dct_h3 );
    SUB4( zero, dct_h0, zero, dct_h1, zero, dct_h2, zero, dct_h3,
          dct0, dct1, dct2, dct3 );

    dct0 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h0,
                                   ( v16u8 ) dct0, ( v16u8 ) dct0_mask );
    dct1 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h1,
                                   ( v16u8 ) dct1, ( v16u8 ) dct1_mask );
    dct2 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h2,
                                   ( v16u8 ) dct2, ( v16u8 ) dct2_mask );
    dct3 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h3,
                                   ( v16u8 ) dct3, ( v16u8 ) dct3_mask );

    non_zero += HADD_SW_S32( ( v4u32 ) ( dct_h0 + dct_h1 + dct_h2 + dct_h3 ) );
    ST_SH4( dct0, dct1, dct2, dct3, p_dct + 32, 8 );

    return !!non_zero;
}