static void intra_predict_dc_32x32_msa(const uint8_t *src_top, const uint8_t *src_left, uint8_t *dst, int32_t dst_stride) { uint32_t row; v16u8 top0, top1, left0, left1, out; v8u16 sum_h, sum_top0, sum_top1, sum_left0, sum_left1; v4u32 sum_w; v2u64 sum_d; LD_UB2(src_top, 16, top0, top1); LD_UB2(src_left, 16, left0, left1); HADD_UB2_UH(top0, top1, sum_top0, sum_top1); HADD_UB2_UH(left0, left1, sum_left0, sum_left1); sum_h = sum_top0 + sum_top1; sum_h += sum_left0 + sum_left1; sum_w = __msa_hadd_u_w(sum_h, sum_h); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 6); out = (v16u8)__msa_splati_b((v16i8)sum_w, 0); for (row = 16; row--;) { ST_UB2(out, out, dst, 16); dst += dst_stride; ST_UB2(out, out, dst, 16); dst += dst_stride; } }
static void intra_predict_tm_16x16_msa(const uint8_t *src_top_ptr, const uint8_t *src_left, uint8_t *dst, int32_t dst_stride) { uint8_t top_left = src_top_ptr[-1]; uint32_t loop_cnt; v16i8 src_top, src_left0, src_left1, src_left2, src_left3; v8u16 src_top_left, res_r, res_l; src_top = LD_SB(src_top_ptr); src_top_left = (v8u16)__msa_fill_h(top_left); for (loop_cnt = 4; loop_cnt--;) { src_left0 = __msa_fill_b(src_left[0]); src_left1 = __msa_fill_b(src_left[1]); src_left2 = __msa_fill_b(src_left[2]); src_left3 = __msa_fill_b(src_left[3]); src_left += 4; ILVRL_B2_UH(src_left0, src_top, res_r, res_l); HADD_UB2_UH(res_r, res_l, res_r, res_l); IPRED_SUBS_UH2_UH(src_top_left, src_top_left, res_r, res_l); SAT_UH2_UH(res_r, res_l, 7); PCKEV_ST_SB(res_r, res_l, dst); dst += dst_stride; ILVRL_B2_UH(src_left1, src_top, res_r, res_l); HADD_UB2_UH(res_r, res_l, res_r, res_l); IPRED_SUBS_UH2_UH(src_top_left, src_top_left, res_r, res_l); SAT_UH2_UH(res_r, res_l, 7); PCKEV_ST_SB(res_r, res_l, dst); dst += dst_stride; ILVRL_B2_UH(src_left2, src_top, res_r, res_l); HADD_UB2_UH(res_r, res_l, res_r, res_l); IPRED_SUBS_UH2_UH(src_top_left, src_top_left, res_r, res_l); SAT_UH2_UH(res_r, res_l, 7); PCKEV_ST_SB(res_r, res_l, dst); dst += dst_stride; ILVRL_B2_UH(src_left3, src_top, res_r, res_l); HADD_UB2_UH(res_r, res_l, res_r, res_l); IPRED_SUBS_UH2_UH(src_top_left, src_top_left, res_r, res_l); SAT_UH2_UH(res_r, res_l, 7); PCKEV_ST_SB(res_r, res_l, dst); dst += dst_stride; } }
static void intra_predict_dc_tl_32x32_msa(const uint8_t *src, uint8_t *dst, int32_t dst_stride) { uint32_t row; v16u8 data0, data1, out; v8u16 sum_h, sum_data0, sum_data1; v4u32 sum_w; v2u64 sum_d; LD_UB2(src, 16, data0, data1); HADD_UB2_UH(data0, data1, sum_data0, sum_data1); sum_h = sum_data0 + sum_data1; sum_w = __msa_hadd_u_w(sum_h, sum_h); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 5); out = (v16u8)__msa_splati_b((v16i8)sum_w, 0); for (row = 16; row--;) { ST_UB2(out, out, dst, 16); dst += dst_stride; ST_UB2(out, out, dst, 16); dst += dst_stride; } }
static int32_t avc_coeff_last64_msa( int16_t *p_src ) { uint32_t u_res; v8i16 src0, src1, src2, src3, src4, src5, src6, src7; v8i16 tmp_h0, tmp_h1, tmp_h2, tmp_h3, tmp_h4, tmp_h5, tmp_h6, tmp_h7; v16u8 tmp0, tmp1, tmp2, tmp3; v8u16 vec0, vec1, vec2, vec3; v4i32 out0; v16u8 mask = { 1, 2, 4, 8, 16, 32, 64, 128, 1, 2, 4, 8, 16, 32, 64, 128 }; LD_SH8( p_src, 8, src0, src1, src2, src3, src4, src5, src6, src7 ); tmp_h0 = __msa_ceqi_h( src0, 0 ); tmp_h1 = __msa_ceqi_h( src1, 0 ); tmp_h2 = __msa_ceqi_h( src2, 0 ); tmp_h3 = __msa_ceqi_h( src3, 0 ); tmp_h4 = __msa_ceqi_h( src4, 0 ); tmp_h5 = __msa_ceqi_h( src5, 0 ); tmp_h6 = __msa_ceqi_h( src6, 0 ); tmp_h7 = __msa_ceqi_h( src7, 0 ); PCKEV_B4_UB( tmp_h1, tmp_h0, tmp_h3, tmp_h2, tmp_h5, tmp_h4, tmp_h7, tmp_h6, tmp0, tmp1, tmp2, tmp3 ); tmp0 = tmp0 & mask; tmp1 = tmp1 & mask; tmp2 = tmp2 & mask; tmp3 = tmp3 & mask; HADD_UB4_UH( tmp0, tmp1, tmp2, tmp3, vec0, vec1, vec2, vec3 ); PCKEV_B2_UB( vec1, vec0, vec3, vec2, tmp0, tmp1 ); HADD_UB2_UH( tmp0, tmp1, vec0, vec1 ); tmp0 = ( v16u8 ) __msa_pckev_b( ( v16i8 ) vec1, ( v16i8 ) vec0 ); vec0 = __msa_hadd_u_h( tmp0, tmp0 ); tmp0 = ( v16u8 ) __msa_pckev_b( ( v16i8 ) vec0, ( v16i8 ) vec0 ); out0 = ( v4i32 ) __msa_nloc_d( ( v2i64 ) tmp0 ); u_res = __msa_copy_u_w( out0, 0 ); return ( 63 - u_res ); }
static void intra_predict_dc_16x16_msa(const uint8_t *src_top, const uint8_t *src_left, uint8_t *dst, int32_t dst_stride) { v16u8 top, left, out; v8u16 sum_h, sum_top, sum_left; v4u32 sum_w; v2u64 sum_d; top = LD_UB(src_top); left = LD_UB(src_left); HADD_UB2_UH(top, left, sum_top, sum_left); sum_h = sum_top + sum_left; sum_w = __msa_hadd_u_w(sum_h, sum_h); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 5); out = (v16u8)__msa_splati_b((v16i8)sum_w, 0); ST_UB8(out, out, out, out, out, out, out, out, dst, dst_stride); dst += (8 * dst_stride); ST_UB8(out, out, out, out, out, out, out, out, dst, dst_stride); }