Example #1
0
static int32_t avc_quant_4x4_msa( int16_t *p_dct, uint16_t *p_mf,
                                  uint16_t *p_bias )
{
    int32_t non_zero = 0;
    v8i16 dct0, dct1;
    v8i16 zero = { 0 };
    v8i16 dct0_mask, dct1_mask;
    v8i16 dct_h0, dct_h1, mf_h0, mf_h1, bias_h0, bias_h1;
    v4i32 dct_signed_w0, dct_signed_w1, dct_signed_w2, dct_signed_w3;
    v4i32 dct_w0, dct_w1, dct_w2, dct_w3;
    v4i32 mf_vec0, mf_vec1, mf_vec2, mf_vec3;
    v4i32 bias0, bias1, bias2, bias3;

    LD_SH2( p_dct, 8, dct0, dct1 );
    LD_SH2( p_bias, 8, bias_h0, bias_h1 );
    LD_SH2( p_mf, 8, mf_h0, mf_h1 );

    dct0_mask = __msa_clei_s_h( dct0, 0 );
    dct1_mask = __msa_clei_s_h( dct1, 0 );

    UNPCK_SH_SW( dct0, dct_signed_w0, dct_signed_w1 );
    UNPCK_SH_SW( dct1, dct_signed_w2, dct_signed_w3 );
    ILVR_H2_SW( zero, bias_h0, zero, bias_h1, bias0, bias2 );
    ILVL_H2_SW( zero, bias_h0, zero, bias_h1, bias1, bias3 );
    ILVR_H2_SW( zero, mf_h0, zero, mf_h1, mf_vec0, mf_vec2 );
    ILVL_H2_SW( zero, mf_h0, zero, mf_h1, mf_vec1, mf_vec3 );

    dct_w1 = __msa_add_a_w( dct_signed_w1, bias1 );
    dct_w0 = __msa_add_a_w( dct_signed_w0, bias0 );
    dct_w2 = __msa_add_a_w( dct_signed_w2, bias2 );
    dct_w3 = __msa_add_a_w( dct_signed_w3, bias3 );

    dct_w0 *= mf_vec0;
    dct_w1 *= mf_vec1;
    dct_w2 *= mf_vec2;
    dct_w3 *= mf_vec3;

    SRA_4V( dct_w0, dct_w1, dct_w2, dct_w3, 16 );
    PCKEV_H2_SH( dct_w1, dct_w0, dct_w3, dct_w2, dct_h0, dct_h1 );

    dct0 = zero - dct_h0;
    dct1 = zero - dct_h1;

    dct0 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h0, ( v16u8 ) dct0,
                                   ( v16u8 ) dct0_mask );
    dct1 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h1, ( v16u8 ) dct1,
                                   ( v16u8 ) dct1_mask );
    non_zero = HADD_SW_S32( ( v4u32 ) ( dct_h0 + dct_h1 ) );
    ST_SH2( dct0, dct1, p_dct, 8 );

    return !!non_zero;
}
Example #2
0
static int32_t avc_quant_4x4_dc_msa( int16_t *p_dct, int32_t i_mf,
                                     int32_t i_bias )
{
    int32_t non_zero = 0;
    v8i16 dct0, dct1, dct0_mask, dct1_mask;
    v8i16 zero = { 0 };
    v8i16 dct_h0, dct_h1;
    v4i32 dct_signed_w0, dct_signed_w1, dct_signed_w2, dct_signed_w3;
    v4i32 dct_w0, dct_w1, dct_w2, dct_w3;
    v4i32 mf_vec, bias_vec;

    LD_SH2( p_dct, 8, dct0, dct1 );

    dct0_mask = __msa_clei_s_h( dct0, 0 );
    dct1_mask = __msa_clei_s_h( dct1, 0 );

    UNPCK_SH_SW( dct0, dct_signed_w0, dct_signed_w1 );
    UNPCK_SH_SW( dct1, dct_signed_w2, dct_signed_w3 );

    bias_vec = __msa_fill_w( i_bias );
    mf_vec = __msa_fill_w( i_mf );

    dct_w0 = __msa_add_a_w( dct_signed_w0, bias_vec );
    dct_w1 = __msa_add_a_w( dct_signed_w1, bias_vec );
    dct_w2 = __msa_add_a_w( dct_signed_w2, bias_vec );
    dct_w3 = __msa_add_a_w( dct_signed_w3, bias_vec );

    dct_w0 *= mf_vec;
    dct_w1 *= mf_vec;
    dct_w2 *= mf_vec;
    dct_w3 *= mf_vec;

    SRA_4V( dct_w0, dct_w1, dct_w2, dct_w3, 16 );
    PCKEV_H2_SH( dct_w1, dct_w0, dct_w3, dct_w2, dct_h0, dct_h1 );

    dct0 = zero - dct_h0;
    dct1 = zero - dct_h1;
    dct0 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h0,
                                   ( v16u8 ) dct0, ( v16u8 ) dct0_mask );
    dct1 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h1,
                                   ( v16u8 ) dct1, ( v16u8 ) dct1_mask );
    non_zero = HADD_SW_S32( ( v4u32 ) ( dct_h0 + dct_h1 ) );

    ST_SH2( dct0, dct1, p_dct, 8 );

    return !!non_zero;
}
Example #3
0
void aom_plane_add_noise_msa(uint8_t *start_ptr, char *noise,
                             char blackclamp[16], char whiteclamp[16],
                             char bothclamp[16], uint32_t width,
                             uint32_t height, int32_t pitch) {
  uint32_t i, j;

  for (i = 0; i < height / 2; ++i) {
    uint8_t *pos0_ptr = start_ptr + (2 * i) * pitch;
    int8_t *ref0_ptr = (int8_t *)(noise + (rand() & 0xff));
    uint8_t *pos1_ptr = start_ptr + (2 * i + 1) * pitch;
    int8_t *ref1_ptr = (int8_t *)(noise + (rand() & 0xff));
    for (j = width / 16; j--;) {
      v16i8 temp00_s, temp01_s;
      v16u8 temp00, temp01, black_clamp, white_clamp;
      v16u8 pos0, ref0, pos1, ref1;
      v16i8 const127 = __msa_ldi_b(127);

      pos0 = LD_UB(pos0_ptr);
      ref0 = LD_UB(ref0_ptr);
      pos1 = LD_UB(pos1_ptr);
      ref1 = LD_UB(ref1_ptr);
      black_clamp = (v16u8)__msa_fill_b(blackclamp[0]);
      white_clamp = (v16u8)__msa_fill_b(whiteclamp[0]);
      temp00 = (pos0 < black_clamp);
      pos0 = __msa_bmnz_v(pos0, black_clamp, temp00);
      temp01 = (pos1 < black_clamp);
      pos1 = __msa_bmnz_v(pos1, black_clamp, temp01);
      XORI_B2_128_UB(pos0, pos1);
      temp00_s = __msa_adds_s_b((v16i8)white_clamp, const127);
      temp00 = (v16u8)(temp00_s < pos0);
      pos0 = (v16u8)__msa_bmnz_v((v16u8)pos0, (v16u8)temp00_s, temp00);
      temp01_s = __msa_adds_s_b((v16i8)white_clamp, const127);
      temp01 = (temp01_s < pos1);
      pos1 = (v16u8)__msa_bmnz_v((v16u8)pos1, (v16u8)temp01_s, temp01);
      XORI_B2_128_UB(pos0, pos1);
      pos0 += ref0;
      ST_UB(pos0, pos0_ptr);
      pos1 += ref1;
      ST_UB(pos1, pos1_ptr);
      pos0_ptr += 16;
      pos1_ptr += 16;
      ref0_ptr += 16;
      ref1_ptr += 16;
    }
  }
}
void vpx_lpf_horizontal_8_dual_msa(uint8_t *src, int32_t pitch,
                                   const uint8_t *b_limit0,
                                   const uint8_t *limit0,
                                   const uint8_t *thresh0,
                                   const uint8_t *b_limit1,
                                   const uint8_t *limit1,
                                   const uint8_t *thresh1) {
  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
  v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
  v16u8 flat, mask, hev, tmp, thresh, b_limit, limit;
  v8u16 p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r;
  v8u16 p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
  v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r;
  v8i16 p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l;
  v16u8 zero = { 0 };

  /* load vector elements */
  LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3);

  thresh = (v16u8)__msa_fill_b(*thresh0);
  tmp = (v16u8)__msa_fill_b(*thresh1);
  thresh = (v16u8)__msa_ilvr_d((v2i64)tmp, (v2i64)thresh);

  b_limit = (v16u8)__msa_fill_b(*b_limit0);
  tmp = (v16u8)__msa_fill_b(*b_limit1);
  b_limit = (v16u8)__msa_ilvr_d((v2i64)tmp, (v2i64)b_limit);

  limit = (v16u8)__msa_fill_b(*limit0);
  tmp = (v16u8)__msa_fill_b(*limit1);
  limit = (v16u8)__msa_ilvr_d((v2i64)tmp, (v2i64)limit);

  /* mask and hev */
  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
               hev, mask, flat);
  VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
  VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);

  if (__msa_test_bz_v(flat)) {
    ST_UB4(p1_out, p0_out, q0_out, q1_out, (src - 2 * pitch), pitch);
  } else {
    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
               zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r,
               q2_r, q3_r);
    VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);

    ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
    ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
    VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
                p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);

    /* convert 16 bit output data into 8 bit */
    PCKEV_B4_SH(p2_filt8_l, p2_filt8_r, p1_filt8_l, p1_filt8_r, p0_filt8_l,
                p0_filt8_r, q0_filt8_l, q0_filt8_r, p2_filt8_r, p1_filt8_r,
                p0_filt8_r, q0_filt8_r);
    PCKEV_B2_SH(q1_filt8_l, q1_filt8_r, q2_filt8_l, q2_filt8_r, q1_filt8_r,
                q2_filt8_r);

    /* store pixel values */
    p2_out = __msa_bmnz_v(p2, (v16u8)p2_filt8_r, flat);
    p1_out = __msa_bmnz_v(p1_out, (v16u8)p1_filt8_r, flat);
    p0_out = __msa_bmnz_v(p0_out, (v16u8)p0_filt8_r, flat);
    q0_out = __msa_bmnz_v(q0_out, (v16u8)q0_filt8_r, flat);
    q1_out = __msa_bmnz_v(q1_out, (v16u8)q1_filt8_r, flat);
    q2_out = __msa_bmnz_v(q2, (v16u8)q2_filt8_r, flat);

    src -= 3 * pitch;

    ST_UB4(p2_out, p1_out, p0_out, q0_out, src, pitch);
    src += (4 * pitch);
    ST_UB2(q1_out, q2_out, src, pitch);
    src += (2 * pitch);
  }
}
void vpx_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch,
                                 const uint8_t *b_limit0,
                                 const uint8_t *limit0,
                                 const uint8_t *thresh0,
                                 const uint8_t *b_limit1,
                                 const uint8_t *limit1,
                                 const uint8_t *thresh1) {
  uint8_t *temp_src;
  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
  v16u8 p1_out, p0_out, q0_out, q1_out;
  v16u8 flat, mask, hev, thresh, b_limit, limit;
  v16u8 row4, row5, row6, row7, row12, row13, row14, row15;
  v8u16 p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r;
  v8u16 p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
  v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r;
  v8i16 p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l;
  v16u8 zero = { 0 };
  v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;

  temp_src = src - 4;

  LD_UB8(temp_src, pitch, p0, p1, p2, p3, row4, row5, row6, row7);
  temp_src += (8 * pitch);
  LD_UB8(temp_src, pitch, q3, q2, q1, q0, row12, row13, row14, row15);

  /* transpose 16x8 matrix into 8x16 */
  TRANSPOSE16x8_UB_UB(p0, p1, p2, p3, row4, row5, row6, row7,
                      q3, q2, q1, q0, row12, row13, row14, row15,
                      p3, p2, p1, p0, q0, q1, q2, q3);

  thresh = (v16u8)__msa_fill_b(*thresh0);
  vec0 = (v8i16)__msa_fill_b(*thresh1);
  thresh = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)thresh);

  b_limit = (v16u8)__msa_fill_b(*b_limit0);
  vec0 = (v8i16)__msa_fill_b(*b_limit1);
  b_limit = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)b_limit);

  limit = (v16u8)__msa_fill_b(*limit0);
  vec0 = (v8i16)__msa_fill_b(*limit1);
  limit = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)limit);

  /* mask and hev */
  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
               hev, mask, flat);
  /* flat4 */
  VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
  /* filter4 */
  VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);

  if (__msa_test_bz_v(flat)) {
    ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
    ILVRL_H2_SH(vec1, vec0, vec2, vec3);
    ILVL_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
    ILVRL_H2_SH(vec1, vec0, vec4, vec5);

    src -= 2;
    ST4x8_UB(vec2, vec3, src, pitch);
    src += 8 * pitch;
    ST4x8_UB(vec4, vec5, src, pitch);
  } else {
    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
               zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r,
               q3_r);
    VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);

    ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
    ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);

    /* filter8 */
    VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
                p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);

    /* convert 16 bit output data into 8 bit */
    PCKEV_B4_SH(p2_filt8_l, p2_filt8_r, p1_filt8_l, p1_filt8_r, p0_filt8_l,
                p0_filt8_r, q0_filt8_l, q0_filt8_r, p2_filt8_r, p1_filt8_r,
                p0_filt8_r, q0_filt8_r);
    PCKEV_B2_SH(q1_filt8_l, q1_filt8_r, q2_filt8_l, q2_filt8_r, q1_filt8_r,
                q2_filt8_r);

    /* store pixel values */
    p2 = __msa_bmnz_v(p2, (v16u8)p2_filt8_r, flat);
    p1 = __msa_bmnz_v(p1_out, (v16u8)p1_filt8_r, flat);
    p0 = __msa_bmnz_v(p0_out, (v16u8)p0_filt8_r, flat);
    q0 = __msa_bmnz_v(q0_out, (v16u8)q0_filt8_r, flat);
    q1 = __msa_bmnz_v(q1_out, (v16u8)q1_filt8_r, flat);
    q2 = __msa_bmnz_v(q2, (v16u8)q2_filt8_r, flat);

    ILVR_B2_SH(p1, p2, q0, p0, vec0, vec1);
    ILVRL_H2_SH(vec1, vec0, vec3, vec4);
    ILVL_B2_SH(p1, p2, q0, p0, vec0, vec1);
    ILVRL_H2_SH(vec1, vec0, vec6, vec7);
    ILVRL_B2_SH(q2, q1, vec2, vec5);

    src -= 3;
    ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch);
    ST2x4_UB(vec2, 0, src + 4, pitch);
    src += (4 * pitch);
    ST4x4_UB(vec4, vec4, 0, 1, 2, 3, src, pitch);
    ST2x4_UB(vec2, 4, src + 4, pitch);
    src += (4 * pitch);
    ST4x4_UB(vec6, vec6, 0, 1, 2, 3, src, pitch);
    ST2x4_UB(vec5, 0, src + 4, pitch);
    src += (4 * pitch);
    ST4x4_UB(vec7, vec7, 0, 1, 2, 3, src, pitch);
    ST2x4_UB(vec5, 4, src + 4, pitch);
  }
}
void vpx_lpf_vertical_8_msa(uint8_t *src, int32_t pitch,
                            const uint8_t *b_limit_ptr,
                            const uint8_t *limit_ptr,
                            const uint8_t *thresh_ptr) {
  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
  v16u8 p1_out, p0_out, q0_out, q1_out;
  v16u8 flat, mask, hev, thresh, b_limit, limit;
  v8u16 p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r;
  v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r;
  v16u8 zero = { 0 };
  v8i16 vec0, vec1, vec2, vec3, vec4;

  /* load vector elements */
  LD_UB8(src - 4, pitch, p3, p2, p1, p0, q0, q1, q2, q3);

  TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3,
                     p3, p2, p1, p0, q0, q1, q2, q3);

  thresh = (v16u8)__msa_fill_b(*thresh_ptr);
  b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
  limit = (v16u8)__msa_fill_b(*limit_ptr);

  /* mask and hev */
  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
               hev, mask, flat);
  /* flat4 */
  VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
  /* filter4 */
  VP9_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);

  flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);

  if (__msa_test_bz_v(flat)) {
    /* Store 4 pixels p1-_q1 */
    ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
    ILVRL_H2_SH(vec1, vec0, vec2, vec3);

    src -= 2;
    ST4x4_UB(vec2, vec2, 0, 1, 2, 3, src, pitch);
    src += 4 * pitch;
    ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch);
  } else {
    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
               zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r,
               q3_r);
    VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
    /* convert 16 bit output data into 8 bit */
    PCKEV_B4_SH(p2_filt8_r, p2_filt8_r, p1_filt8_r, p1_filt8_r, p0_filt8_r,
                p0_filt8_r, q0_filt8_r, q0_filt8_r, p2_filt8_r, p1_filt8_r,
                p0_filt8_r, q0_filt8_r);
    PCKEV_B2_SH(q1_filt8_r, q1_filt8_r, q2_filt8_r, q2_filt8_r, q1_filt8_r,
                q2_filt8_r);

    /* store pixel values */
    p2 = __msa_bmnz_v(p2, (v16u8)p2_filt8_r, flat);
    p1 = __msa_bmnz_v(p1_out, (v16u8)p1_filt8_r, flat);
    p0 = __msa_bmnz_v(p0_out, (v16u8)p0_filt8_r, flat);
    q0 = __msa_bmnz_v(q0_out, (v16u8)q0_filt8_r, flat);
    q1 = __msa_bmnz_v(q1_out, (v16u8)q1_filt8_r, flat);
    q2 = __msa_bmnz_v(q2, (v16u8)q2_filt8_r, flat);

    /* Store 6 pixels p2-_q2 */
    ILVR_B2_SH(p1, p2, q0, p0, vec0, vec1);
    ILVRL_H2_SH(vec1, vec0, vec2, vec3);
    vec4 = (v8i16)__msa_ilvr_b((v16i8)q2, (v16i8)q1);

    src -= 3;
    ST4x4_UB(vec2, vec2, 0, 1, 2, 3, src, pitch);
    ST2x4_UB(vec4, 0, src + 4, pitch);
    src += (4 * pitch);
    ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch);
    ST2x4_UB(vec4, 4, src + 4, pitch);
  }
}
void vpx_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch,
                              const uint8_t *b_limit_ptr,
                              const uint8_t *limit_ptr,
                              const uint8_t *thresh_ptr) {
  uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d;
  v16u8 mask, hev, flat, thresh, b_limit, limit;
  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
  v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
  v8i16 p2_filter8, p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8;
  v8u16 p3_r, p2_r, p1_r, p0_r, q3_r, q2_r, q1_r, q0_r;
  v16i8 zero = { 0 };

  /* load vector elements */
  LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3);

  thresh = (v16u8)__msa_fill_b(*thresh_ptr);
  b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
  limit = (v16u8)__msa_fill_b(*limit_ptr);

  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
               hev, mask, flat);
  VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
  VP9_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);

  flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);

  if (__msa_test_bz_v(flat)) {
    p1_d = __msa_copy_u_d((v2i64)p1_out, 0);
    p0_d = __msa_copy_u_d((v2i64)p0_out, 0);
    q0_d = __msa_copy_u_d((v2i64)q0_out, 0);
    q1_d = __msa_copy_u_d((v2i64)q1_out, 0);
    SD4(p1_d, p0_d, q0_d, q1_d, (src - 2 * pitch), pitch);
  } else {
    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
               zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r,
               q2_r, q3_r);
    VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
                p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8);

    /* convert 16 bit output data into 8 bit */
    PCKEV_B4_SH(zero, p2_filter8, zero, p1_filter8, zero, p0_filter8,
                zero, q0_filter8, p2_filter8, p1_filter8, p0_filter8,
                q0_filter8);
    PCKEV_B2_SH(zero, q1_filter8, zero, q2_filter8, q1_filter8, q2_filter8);

    /* store pixel values */
    p2_out = __msa_bmnz_v(p2, (v16u8)p2_filter8, flat);
    p1_out = __msa_bmnz_v(p1_out, (v16u8)p1_filter8, flat);
    p0_out = __msa_bmnz_v(p0_out, (v16u8)p0_filter8, flat);
    q0_out = __msa_bmnz_v(q0_out, (v16u8)q0_filter8, flat);
    q1_out = __msa_bmnz_v(q1_out, (v16u8)q1_filter8, flat);
    q2_out = __msa_bmnz_v(q2, (v16u8)q2_filter8, flat);

    p2_d = __msa_copy_u_d((v2i64)p2_out, 0);
    p1_d = __msa_copy_u_d((v2i64)p1_out, 0);
    p0_d = __msa_copy_u_d((v2i64)p0_out, 0);
    q0_d = __msa_copy_u_d((v2i64)q0_out, 0);
    q1_d = __msa_copy_u_d((v2i64)q1_out, 0);
    q2_d = __msa_copy_u_d((v2i64)q2_out, 0);

    src -= 3 * pitch;

    SD4(p2_d, p1_d, p0_d, q0_d, src, pitch);
    src += (4 * pitch);
    SD(q1_d, src);
    src += pitch;
    SD(q2_d, src);
  }
}
Example #8
0
int32_t vp8_mbblock_error_msa(MACROBLOCK *mb, int32_t dc)
{
    BLOCK *be;
    BLOCKD *bd;
    int16_t *coeff_ptr, *dq_coeff_ptr;
    int32_t err = 0;
    uint32_t loop_cnt;
    v8i16 coeff, coeff0, coeff1, coeff2, coeff3, coeff4;
    v8i16 dq_coeff, dq_coeff2, dq_coeff3, dq_coeff4;
    v4i32 diff0, diff1;
    v2i64 err0, err1;
    v16u8 zero  = { 0 };
    v16u8 mask0 = (v16u8)__msa_ldi_b(255);

    if (1 == dc)
    {
        mask0 = (v16u8)__msa_insve_w((v4i32)mask0, 0, (v4i32)zero);
    }

    for (loop_cnt = 0; loop_cnt < 8; loop_cnt++)
    {
        be = &mb->block[2 * loop_cnt];
        bd = &mb->e_mbd.block[2 * loop_cnt];
        coeff_ptr = be->coeff;
        dq_coeff_ptr = bd->dqcoeff;
        coeff = LD_SH(coeff_ptr);
        dq_coeff = LD_SH(dq_coeff_ptr);
        coeff_ptr += 8;
        dq_coeff_ptr += 8;
        coeff2 = LD_SH(coeff_ptr);
        dq_coeff2 = LD_SH(dq_coeff_ptr);
        be = &mb->block[2 * loop_cnt + 1];
        bd = &mb->e_mbd.block[2 * loop_cnt + 1];
        coeff_ptr = be->coeff;
        dq_coeff_ptr = bd->dqcoeff;
        coeff3 = LD_SH(coeff_ptr);
        dq_coeff3 = LD_SH(dq_coeff_ptr);
        coeff_ptr += 8;
        dq_coeff_ptr += 8;
        coeff4 = LD_SH(coeff_ptr);
        dq_coeff4 = LD_SH(dq_coeff_ptr);
        ILVRL_H2_SH(coeff, dq_coeff, coeff0, coeff1);
        HSUB_UH2_SW(coeff0, coeff1, diff0, diff1);
        diff0 = (v4i32)__msa_bmnz_v(zero, (v16u8)diff0, mask0);
        DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1);
        ILVRL_H2_SH(coeff2, dq_coeff2, coeff0, coeff1);
        HSUB_UH2_SW(coeff0, coeff1, diff0, diff1);
        DPADD_SD2_SD(diff0, diff1, err0, err1);
        err0 += __msa_splati_d(err0, 1);
        err1 += __msa_splati_d(err1, 1);
        err += __msa_copy_s_d(err0, 0);
        err += __msa_copy_s_d(err1, 0);

        ILVRL_H2_SH(coeff3, dq_coeff3, coeff0, coeff1);
        HSUB_UH2_SW(coeff0, coeff1, diff0, diff1);
        diff0 = (v4i32)__msa_bmnz_v(zero, (v16u8)diff0, mask0);
        DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1);
        ILVRL_H2_SH(coeff4, dq_coeff4, coeff0, coeff1);
        HSUB_UH2_SW(coeff0, coeff1, diff0, diff1);
        DPADD_SD2_SD(diff0, diff1, err0, err1);
        err0 += __msa_splati_d(err0, 1);
        err1 += __msa_splati_d(err1, 1);
        err += __msa_copy_s_d(err0, 0);
        err += __msa_copy_s_d(err1, 0);
    }

    return err;
}
Example #9
0
static int32_t avc_quant_8x8_msa( int16_t *p_dct, uint16_t *p_mf,
                                  uint16_t *p_bias )
{
    int32_t non_zero = 0;
    v8i16 dct0, dct1, dct2, dct3;
    v8i16 zero = { 0 };
    v8i16 dct0_mask, dct1_mask, dct2_mask, dct3_mask;
    v8i16 dct_h0, dct_h1, dct_h2, dct_h3, mf_h0, mf_h1, mf_h2, mf_h3;
    v8i16 bias_h0, bias_h1, bias_h2, bias_h3;
    v4i32 dct_w0, dct_w1, dct_w2, dct_w3, dct_w4, dct_w5, dct_w6, dct_w7;
    v4i32 dct_signed_w0, dct_signed_w1, dct_signed_w2, dct_signed_w3;
    v4i32 dct_signed_w4, dct_signed_w5, dct_signed_w6, dct_signed_w7;
    v4i32 mf_vec0, mf_vec1, mf_vec2, mf_vec3;
    v4i32 mf_vec4, mf_vec5, mf_vec6, mf_vec7;
    v4i32 bias0, bias1, bias2, bias3, bias4, bias5, bias6, bias7;

    LD_SH4( p_dct, 8, dct0, dct1, dct2, dct3 );

    dct0_mask = __msa_clei_s_h( dct0, 0 );
    dct1_mask = __msa_clei_s_h( dct1, 0 );
    dct2_mask = __msa_clei_s_h( dct2, 0 );
    dct3_mask = __msa_clei_s_h( dct3, 0 );

    UNPCK_SH_SW( dct0, dct_signed_w0, dct_signed_w1 );
    UNPCK_SH_SW( dct1, dct_signed_w2, dct_signed_w3 );
    UNPCK_SH_SW( dct2, dct_signed_w4, dct_signed_w5 );
    UNPCK_SH_SW( dct3, dct_signed_w6, dct_signed_w7 );
    LD_SH4( p_bias, 8, bias_h0, bias_h1, bias_h2, bias_h3 );
    ILVR_H4_SW( zero, bias_h0, zero, bias_h1, zero, bias_h2, zero, bias_h3,
                bias0, bias2, bias4, bias6 );
    ILVL_H4_SW( zero, bias_h0, zero, bias_h1, zero, bias_h2, zero, bias_h3,
                bias1, bias3, bias5, bias7 );
    LD_SH4( p_mf, 8, mf_h0, mf_h1, mf_h2, mf_h3 );
    ILVR_H4_SW( zero, mf_h0, zero, mf_h1, zero, mf_h2, zero, mf_h3,
                mf_vec0, mf_vec2, mf_vec4, mf_vec6 );
    ILVL_H4_SW( zero, mf_h0, zero, mf_h1, zero, mf_h2, zero, mf_h3,
                mf_vec1, mf_vec3, mf_vec5, mf_vec7 );

    dct_w0 = __msa_add_a_w( dct_signed_w0, bias0 );
    dct_w1 = __msa_add_a_w( dct_signed_w1, bias1 );
    dct_w2 = __msa_add_a_w( dct_signed_w2, bias2 );
    dct_w3 = __msa_add_a_w( dct_signed_w3, bias3 );
    dct_w4 = __msa_add_a_w( dct_signed_w4, bias4 );
    dct_w5 = __msa_add_a_w( dct_signed_w5, bias5 );
    dct_w6 = __msa_add_a_w( dct_signed_w6, bias6 );
    dct_w7 = __msa_add_a_w( dct_signed_w7, bias7 );

    dct_w0 *= mf_vec0;
    dct_w1 *= mf_vec1;
    dct_w2 *= mf_vec2;
    dct_w3 *= mf_vec3;
    dct_w4 *= mf_vec4;
    dct_w5 *= mf_vec5;
    dct_w6 *= mf_vec6;
    dct_w7 *= mf_vec7;

    SRA_4V( dct_w0, dct_w1, dct_w2, dct_w3, 16 );
    SRA_4V( dct_w4, dct_w5, dct_w6, dct_w7, 16 );
    PCKEV_H4_SH( dct_w1, dct_w0, dct_w3, dct_w2, dct_w5, dct_w4, dct_w7, dct_w6,
                 dct_h0, dct_h1, dct_h2, dct_h3 );
    SUB4( zero, dct_h0, zero, dct_h1, zero, dct_h2, zero, dct_h3,
          dct0, dct1, dct2, dct3 );

    dct0 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h0,
                                   ( v16u8 ) dct0, ( v16u8 ) dct0_mask );
    dct1 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h1,
                                   ( v16u8 ) dct1, ( v16u8 ) dct1_mask );
    dct2 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h2,
                                   ( v16u8 ) dct2, ( v16u8 ) dct2_mask );
    dct3 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h3,
                                   ( v16u8 ) dct3, ( v16u8 ) dct3_mask );

    non_zero = HADD_SW_S32( ( v4u32 )( dct_h0 + dct_h1 + dct_h2 + dct_h3 ) );
    ST_SH4( dct0, dct1, dct2, dct3, p_dct, 8 );
    LD_SH4( p_dct + 32, 8, dct0, dct1, dct2, dct3 );

    dct0_mask = __msa_clei_s_h( dct0, 0 );
    dct1_mask = __msa_clei_s_h( dct1, 0 );
    dct2_mask = __msa_clei_s_h( dct2, 0 );
    dct3_mask = __msa_clei_s_h( dct3, 0 );

    UNPCK_SH_SW( dct0, dct_signed_w0, dct_signed_w1 );
    UNPCK_SH_SW( dct1, dct_signed_w2, dct_signed_w3 );
    UNPCK_SH_SW( dct2, dct_signed_w4, dct_signed_w5 );
    UNPCK_SH_SW( dct3, dct_signed_w6, dct_signed_w7 );
    LD_SH4( p_bias + 32, 8, bias_h0, bias_h1, bias_h2, bias_h3 );
    ILVR_H4_SW( zero, bias_h0, zero, bias_h1, zero, bias_h2, zero, bias_h3,
                bias0, bias2, bias4, bias6 );
    ILVL_H4_SW( zero, bias_h0, zero, bias_h1, zero, bias_h2, zero, bias_h3,
                bias1, bias3, bias5, bias7 );
    LD_SH4( p_mf + 32, 8, mf_h0, mf_h1, mf_h2, mf_h3 );
    ILVR_H4_SW( zero, mf_h0, zero, mf_h1, zero, mf_h2, zero, mf_h3,
                mf_vec0, mf_vec2, mf_vec4, mf_vec6 );
    ILVL_H4_SW( zero, mf_h0, zero, mf_h1, zero, mf_h2, zero, mf_h3,
                mf_vec1, mf_vec3, mf_vec5, mf_vec7 );

    dct_w0 = __msa_add_a_w( dct_signed_w0, bias0 );
    dct_w1 = __msa_add_a_w( dct_signed_w1, bias1 );
    dct_w2 = __msa_add_a_w( dct_signed_w2, bias2 );
    dct_w3 = __msa_add_a_w( dct_signed_w3, bias3 );
    dct_w4 = __msa_add_a_w( dct_signed_w4, bias4 );
    dct_w5 = __msa_add_a_w( dct_signed_w5, bias5 );
    dct_w6 = __msa_add_a_w( dct_signed_w6, bias6 );
    dct_w7 = __msa_add_a_w( dct_signed_w7, bias7 );

    dct_w0 *= mf_vec0;
    dct_w1 *= mf_vec1;
    dct_w2 *= mf_vec2;
    dct_w3 *= mf_vec3;
    dct_w4 *= mf_vec4;
    dct_w5 *= mf_vec5;
    dct_w6 *= mf_vec6;
    dct_w7 *= mf_vec7;

    SRA_4V( dct_w0, dct_w1, dct_w2, dct_w3, 16 );
    SRA_4V( dct_w4, dct_w5, dct_w6, dct_w7, 16 );
    PCKEV_H2_SH( dct_w1, dct_w0, dct_w3, dct_w2, dct_h0, dct_h1 );
    PCKEV_H2_SH( dct_w5, dct_w4, dct_w7, dct_w6, dct_h2, dct_h3 );
    SUB4( zero, dct_h0, zero, dct_h1, zero, dct_h2, zero, dct_h3,
          dct0, dct1, dct2, dct3 );

    dct0 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h0,
                                   ( v16u8 ) dct0, ( v16u8 ) dct0_mask );
    dct1 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h1,
                                   ( v16u8 ) dct1, ( v16u8 ) dct1_mask );
    dct2 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h2,
                                   ( v16u8 ) dct2, ( v16u8 ) dct2_mask );
    dct3 = ( v8i16 ) __msa_bmnz_v( ( v16u8 ) dct_h3,
                                   ( v16u8 ) dct3, ( v16u8 ) dct3_mask );

    non_zero += HADD_SW_S32( ( v4u32 ) ( dct_h0 + dct_h1 + dct_h2 + dct_h3 ) );
    ST_SH4( dct0, dct1, dct2, dct3, p_dct + 32, 8 );

    return !!non_zero;
}