Example #1
0
static void intra_predict_tm_32x32_msa(const uint8_t *src_top,
                                       const uint8_t *src_left,
                                       uint8_t *dst, int32_t dst_stride) {
  uint8_t top_left = src_top[-1];
  uint32_t loop_cnt;
  v16i8 src_top0, src_top1, src_left0, src_left1, src_left2, src_left3;
  v8u16 src_top_left, res_r0, res_r1, res_l0, res_l1;

  LD_SB2(src_top, 16, src_top0, src_top1);
  src_top_left = (v8u16)__msa_fill_h(top_left);

  for (loop_cnt = 8; loop_cnt--;) {
    src_left0 = __msa_fill_b(src_left[0]);
    src_left1 = __msa_fill_b(src_left[1]);
    src_left2 = __msa_fill_b(src_left[2]);
    src_left3 = __msa_fill_b(src_left[3]);
    src_left += 4;

    ILVR_B2_UH(src_left0, src_top0, src_left0, src_top1, res_r0, res_r1);
    ILVL_B2_UH(src_left0, src_top0, src_left0, src_top1, res_l0, res_l1);
    HADD_UB4_UH(res_r0, res_l0, res_r1, res_l1, res_r0, res_l0, res_r1, res_l1);
    IPRED_SUBS_UH2_UH(src_top_left, src_top_left, res_r0, res_l0);
    IPRED_SUBS_UH2_UH(src_top_left, src_top_left, res_r1, res_l1);
    SAT_UH4_UH(res_r0, res_l0, res_r1, res_l1, 7);
    PCKEV_ST_SB(res_r0, res_l0, dst);
    PCKEV_ST_SB(res_r1, res_l1, dst + 16);
    dst += dst_stride;

    ILVR_B2_UH(src_left1, src_top0, src_left1, src_top1, res_r0, res_r1);
    ILVL_B2_UH(src_left1, src_top0, src_left1, src_top1, res_l0, res_l1);
    HADD_UB4_UH(res_r0, res_l0, res_r1, res_l1, res_r0, res_l0, res_r1, res_l1);
    IPRED_SUBS_UH2_UH(src_top_left, src_top_left, res_r0, res_l0);
    IPRED_SUBS_UH2_UH(src_top_left, src_top_left, res_r1, res_l1);
    SAT_UH4_UH(res_r0, res_l0, res_r1, res_l1, 7);
    PCKEV_ST_SB(res_r0, res_l0, dst);
    PCKEV_ST_SB(res_r1, res_l1, dst + 16);
    dst += dst_stride;

    ILVR_B2_UH(src_left2, src_top0, src_left2, src_top1, res_r0, res_r1);
    ILVL_B2_UH(src_left2, src_top0, src_left2, src_top1, res_l0, res_l1);
    HADD_UB4_UH(res_r0, res_l0, res_r1, res_l1, res_r0, res_l0, res_r1, res_l1);
    IPRED_SUBS_UH2_UH(src_top_left, src_top_left, res_r0, res_l0);
    IPRED_SUBS_UH2_UH(src_top_left, src_top_left, res_r1, res_l1);
    SAT_UH4_UH(res_r0, res_l0, res_r1, res_l1, 7);
    PCKEV_ST_SB(res_r0, res_l0, dst);
    PCKEV_ST_SB(res_r1, res_l1, dst + 16);
    dst += dst_stride;

    ILVR_B2_UH(src_left3, src_top0, src_left3, src_top1, res_r0, res_r1);
    ILVL_B2_UH(src_left3, src_top0, src_left3, src_top1, res_l0, res_l1);
    HADD_UB4_UH(res_r0, res_l0, res_r1, res_l1, res_r0, res_l0, res_r1, res_l1);
    IPRED_SUBS_UH2_UH(src_top_left, src_top_left, res_r0, res_l0);
    IPRED_SUBS_UH2_UH(src_top_left, src_top_left, res_r1, res_l1);
    SAT_UH4_UH(res_r0, res_l0, res_r1, res_l1, 7);
    PCKEV_ST_SB(res_r0, res_l0, dst);
    PCKEV_ST_SB(res_r1, res_l1, dst + 16);
    dst += dst_stride;
  }
}
Example #2
0
static void intra_predict_tm_4x4_msa(const uint8_t *src_top_ptr,
                                     const uint8_t *src_left,
                                     uint8_t *dst, int32_t dst_stride) {
  uint32_t val;
  uint8_t top_left = src_top_ptr[-1];
  v16i8 src_left0, src_left1, src_left2, src_left3, tmp0, tmp1, src_top = { 0 };
  v16u8 src0, src1, src2, src3;
  v8u16 src_top_left, vec0, vec1, vec2, vec3;

  src_top_left = (v8u16)__msa_fill_h(top_left);
  val = LW(src_top_ptr);
  src_top = (v16i8)__msa_insert_w((v4i32)src_top, 0, val);

  src_left0 = __msa_fill_b(src_left[0]);
  src_left1 = __msa_fill_b(src_left[1]);
  src_left2 = __msa_fill_b(src_left[2]);
  src_left3 = __msa_fill_b(src_left[3]);

  ILVR_B4_UB(src_left0, src_top, src_left1, src_top, src_left2, src_top,
             src_left3, src_top, src0, src1, src2, src3);
  HADD_UB4_UH(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
  IPRED_SUBS_UH2_UH(src_top_left, src_top_left, vec0, vec1);
  IPRED_SUBS_UH2_UH(src_top_left, src_top_left, vec2, vec3);
  SAT_UH4_UH(vec0, vec1, vec2, vec3, 7);
  PCKEV_B2_SB(vec1, vec0, vec3, vec2, tmp0, tmp1);
  ST4x4_UB(tmp0, tmp1, 0, 2, 0, 2, dst, dst_stride);
}
Example #3
0
static void intra_predict_tm_8x8_msa(const uint8_t *src_top_ptr,
                                     const uint8_t *src_left,
                                     uint8_t *dst, int32_t dst_stride) {
  uint64_t val;
  uint8_t top_left = src_top_ptr[-1];
  uint32_t loop_cnt;
  v16i8 src_left0, src_left1, src_left2, src_left3, tmp0, tmp1, src_top = { 0 };
  v8u16 src_top_left, vec0, vec1, vec2, vec3;
  v16u8 src0, src1, src2, src3;

  val = LD(src_top_ptr);
  src_top = (v16i8)__msa_insert_d((v2i64)src_top, 0, val);
  src_top_left = (v8u16)__msa_fill_h(top_left);

  for (loop_cnt = 2; loop_cnt--;) {
    src_left0 = __msa_fill_b(src_left[0]);
    src_left1 = __msa_fill_b(src_left[1]);
    src_left2 = __msa_fill_b(src_left[2]);
    src_left3 = __msa_fill_b(src_left[3]);
    src_left += 4;

    ILVR_B4_UB(src_left0, src_top, src_left1, src_top, src_left2, src_top,
               src_left3, src_top, src0, src1, src2, src3);
    HADD_UB4_UH(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
    IPRED_SUBS_UH2_UH(src_top_left, src_top_left, vec0, vec1);
    IPRED_SUBS_UH2_UH(src_top_left, src_top_left, vec2, vec3);
    SAT_UH4_UH(vec0, vec1, vec2, vec3, 7);
    PCKEV_B2_SB(vec1, vec0, vec3, vec2, tmp0, tmp1);
    ST8x4_UB(tmp0, tmp1, dst, dst_stride);
    dst += (4 * dst_stride);
  }
}
Example #4
0
uint32_t vp10_avg_8x8_msa(const uint8_t *src, int32_t src_stride) {
  uint32_t sum_out;
  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
  v8u16 sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7;
  v4u32 sum = { 0 };

  LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
  HADD_UB4_UH(src0, src1, src2, src3, sum0, sum1, sum2, sum3);
  HADD_UB4_UH(src4, src5, src6, src7, sum4, sum5, sum6, sum7);
  ADD4(sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum0, sum2, sum4, sum6);
  ADD2(sum0, sum2, sum4, sum6, sum0, sum4);
  sum0 += sum4;

  sum = __msa_hadd_u_w(sum0, sum0);
  sum0 = (v8u16)__msa_pckev_h((v8i16)sum, (v8i16)sum);
  sum = __msa_hadd_u_w(sum0, sum0);
  sum = (v4u32)__msa_srari_w((v4i32)sum, 6);
  sum_out = __msa_copy_u_w((v4i32)sum, 0);

  return sum_out;
}
Example #5
0
static int32_t avc_coeff_last64_msa( int16_t *p_src )
{
    uint32_t u_res;
    v8i16 src0, src1, src2, src3, src4, src5, src6, src7;
    v8i16 tmp_h0, tmp_h1, tmp_h2, tmp_h3, tmp_h4, tmp_h5, tmp_h6, tmp_h7;
    v16u8 tmp0, tmp1, tmp2, tmp3;
    v8u16 vec0, vec1, vec2, vec3;
    v4i32 out0;
    v16u8 mask = { 1, 2, 4, 8, 16, 32, 64, 128, 1, 2, 4, 8, 16, 32, 64, 128 };

    LD_SH8( p_src, 8, src0, src1, src2, src3, src4, src5, src6, src7 );

    tmp_h0 = __msa_ceqi_h( src0, 0 );
    tmp_h1 = __msa_ceqi_h( src1, 0 );
    tmp_h2 = __msa_ceqi_h( src2, 0 );
    tmp_h3 = __msa_ceqi_h( src3, 0 );
    tmp_h4 = __msa_ceqi_h( src4, 0 );
    tmp_h5 = __msa_ceqi_h( src5, 0 );
    tmp_h6 = __msa_ceqi_h( src6, 0 );
    tmp_h7 = __msa_ceqi_h( src7, 0 );

    PCKEV_B4_UB( tmp_h1, tmp_h0, tmp_h3, tmp_h2, tmp_h5, tmp_h4, tmp_h7, tmp_h6,
                 tmp0, tmp1, tmp2, tmp3 );

    tmp0 = tmp0 & mask;
    tmp1 = tmp1 & mask;
    tmp2 = tmp2 & mask;
    tmp3 = tmp3 & mask;

    HADD_UB4_UH( tmp0, tmp1, tmp2, tmp3, vec0, vec1, vec2, vec3 );
    PCKEV_B2_UB( vec1, vec0, vec3, vec2, tmp0, tmp1 );
    HADD_UB2_UH( tmp0, tmp1, vec0, vec1 );

    tmp0 = ( v16u8 ) __msa_pckev_b( ( v16i8 ) vec1, ( v16i8 ) vec0 );
    vec0 = __msa_hadd_u_h( tmp0, tmp0 );
    tmp0 = ( v16u8 ) __msa_pckev_b( ( v16i8 ) vec0, ( v16i8 ) vec0 );
    out0 = ( v4i32 ) __msa_nloc_d( ( v2i64 ) tmp0 );
    u_res = __msa_copy_u_w( out0, 0 );

    return ( 63 - u_res );
}