static void intra_predict_dc_tl_32x32_msa(const uint8_t *src, uint8_t *dst, int32_t dst_stride) { uint32_t row; v16u8 data0, data1, out; v8u16 sum_h, sum_data0, sum_data1; v4u32 sum_w; v2u64 sum_d; LD_UB2(src, 16, data0, data1); HADD_UB2_UH(data0, data1, sum_data0, sum_data1); sum_h = sum_data0 + sum_data1; sum_w = __msa_hadd_u_w(sum_h, sum_h); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 5); out = (v16u8)__msa_splati_b((v16i8)sum_w, 0); for (row = 16; row--;) { ST_UB2(out, out, dst, 16); dst += dst_stride; ST_UB2(out, out, dst, 16); dst += dst_stride; } }
static void intra_predict_dc_8x8_msa(uint8_t *src_top, uint8_t *src_left, int32_t src_stride_left, uint8_t *dst, int32_t dst_stride, uint8_t is_above, uint8_t is_left) { uint32_t row, addition = 0; uint64_t out; v16u8 src_above, store; v8u16 sum_above; v4u32 sum_top; v2u64 sum; if (is_left && is_above) { src_above = LD_UB(src_top); sum_above = __msa_hadd_u_h(src_above, src_above); sum_top = __msa_hadd_u_w(sum_above, sum_above); sum = __msa_hadd_u_d(sum_top, sum_top); addition = __msa_copy_u_w((v4i32)sum, 0); for (row = 0; row < 8; ++row) { addition += src_left[row * src_stride_left]; } addition = (addition + 8) >> 4; store = (v16u8)__msa_fill_b(addition); }
static void intra_predict_dc_8x8_msa(const uint8_t *src_top, const uint8_t *src_left, uint8_t *dst, int32_t dst_stride) { uint64_t val0, val1; v16i8 store; v16u8 src = { 0 }; v8u16 sum_h; v4u32 sum_w; v2u64 sum_d; val0 = LD(src_top); val1 = LD(src_left); INSERT_D2_UB(val0, val1, src); sum_h = __msa_hadd_u_h(src, src); sum_w = __msa_hadd_u_w(sum_h, sum_h); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 4); store = __msa_splati_b((v16i8)sum_w, 0); val0 = __msa_copy_u_d((v2i64)store, 0); SD4(val0, val0, val0, val0, dst, dst_stride); dst += (4 * dst_stride); SD4(val0, val0, val0, val0, dst, dst_stride); }
static void intra_predict_dc_32x32_msa(const uint8_t *src_top, const uint8_t *src_left, uint8_t *dst, int32_t dst_stride) { uint32_t row; v16u8 top0, top1, left0, left1, out; v8u16 sum_h, sum_top0, sum_top1, sum_left0, sum_left1; v4u32 sum_w; v2u64 sum_d; LD_UB2(src_top, 16, top0, top1); LD_UB2(src_left, 16, left0, left1); HADD_UB2_UH(top0, top1, sum_top0, sum_top1); HADD_UB2_UH(left0, left1, sum_left0, sum_left1); sum_h = sum_top0 + sum_top1; sum_h += sum_left0 + sum_left1; sum_w = __msa_hadd_u_w(sum_h, sum_h); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 6); out = (v16u8)__msa_splati_b((v16i8)sum_w, 0); for (row = 16; row--;) { ST_UB2(out, out, dst, 16); dst += dst_stride; ST_UB2(out, out, dst, 16); dst += dst_stride; } }
uint32_t vp10_avg_4x4_msa(const uint8_t *src, int32_t src_stride) { uint32_t sum_out; uint32_t src0, src1, src2, src3; v16u8 vec = { 0 }; v8u16 sum0; v4u32 sum1; v2u64 sum2; LW4(src, src_stride, src0, src1, src2, src3); INSERT_W4_UB(src0, src1, src2, src3, vec); sum0 = __msa_hadd_u_h(vec, vec); sum1 = __msa_hadd_u_w(sum0, sum0); sum0 = (v8u16)__msa_pckev_h((v8i16)sum1, (v8i16)sum1); sum1 = __msa_hadd_u_w(sum0, sum0); sum2 = __msa_hadd_u_d(sum1, sum1); sum1 = (v4u32)__msa_srari_w((v4i32)sum2, 4); sum_out = __msa_copy_u_w((v4i32)sum1, 0); return sum_out; }
uint32_t vp10_avg_8x8_msa(const uint8_t *src, int32_t src_stride) { uint32_t sum_out; v16u8 src0, src1, src2, src3, src4, src5, src6, src7; v8u16 sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7; v4u32 sum = { 0 }; LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); HADD_UB4_UH(src0, src1, src2, src3, sum0, sum1, sum2, sum3); HADD_UB4_UH(src4, src5, src6, src7, sum4, sum5, sum6, sum7); ADD4(sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum0, sum2, sum4, sum6); ADD2(sum0, sum2, sum4, sum6, sum0, sum4); sum0 += sum4; sum = __msa_hadd_u_w(sum0, sum0); sum0 = (v8u16)__msa_pckev_h((v8i16)sum, (v8i16)sum); sum = __msa_hadd_u_w(sum0, sum0); sum = (v4u32)__msa_srari_w((v4i32)sum, 6); sum_out = __msa_copy_u_w((v4i32)sum, 0); return sum_out; }
static void intra_predict_dc_tl_4x4_msa(const uint8_t *src, uint8_t *dst, int32_t dst_stride) { uint32_t val0; v16i8 store, data = { 0 }; v8u16 sum_h; v4u32 sum_w; val0 = LW(src); data = (v16i8)__msa_insert_w((v4i32)data, 0, val0); sum_h = __msa_hadd_u_h((v16u8)data, (v16u8)data); sum_w = __msa_hadd_u_w(sum_h, sum_h); sum_w = (v4u32)__msa_srari_w((v4i32)sum_w, 2); store = __msa_splati_b((v16i8)sum_w, 0); val0 = __msa_copy_u_w((v4i32)store, 0); SW4(val0, val0, val0, val0, dst, dst_stride); }
static void intra_predict_dc_tl_16x16_msa(const uint8_t *src, uint8_t *dst, int32_t dst_stride) { v16u8 data, out; v8u16 sum_h; v4u32 sum_w; v2u64 sum_d; data = LD_UB(src); sum_h = __msa_hadd_u_h(data, data); sum_w = __msa_hadd_u_w(sum_h, sum_h); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 4); out = (v16u8)__msa_splati_b((v16i8)sum_w, 0); ST_UB8(out, out, out, out, out, out, out, out, dst, dst_stride); dst += (8 * dst_stride); ST_UB8(out, out, out, out, out, out, out, out, dst, dst_stride); }
static void intra_predict_dc_4x4_msa(const uint8_t *src_top, const uint8_t *src_left, uint8_t *dst, int32_t dst_stride) { uint32_t val0, val1; v16i8 store, src = { 0 }; v8u16 sum_h; v4u32 sum_w; v2u64 sum_d; val0 = LW(src_top); val1 = LW(src_left); INSERT_W2_SB(val0, val1, src); sum_h = __msa_hadd_u_h((v16u8)src, (v16u8)src); sum_w = __msa_hadd_u_w(sum_h, sum_h); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 3); store = __msa_splati_b((v16i8)sum_w, 0); val0 = __msa_copy_u_w((v4i32)store, 0); SW4(val0, val0, val0, val0, dst, dst_stride); }
static void intra_predict_dc_tl_8x8_msa(const uint8_t *src, uint8_t *dst, int32_t dst_stride) { uint64_t val0; v16i8 store; v16u8 data = { 0 }; v8u16 sum_h; v4u32 sum_w; v2u64 sum_d; val0 = LD(src); data = (v16u8)__msa_insert_d((v2i64)data, 0, val0); sum_h = __msa_hadd_u_h(data, data); sum_w = __msa_hadd_u_w(sum_h, sum_h); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 3); store = __msa_splati_b((v16i8)sum_w, 0); val0 = __msa_copy_u_d((v2i64)store, 0); SD4(val0, val0, val0, val0, dst, dst_stride); dst += (4 * dst_stride); SD4(val0, val0, val0, val0, dst, dst_stride); }
static void intra_predict_dc_16x16_msa(const uint8_t *src_top, const uint8_t *src_left, uint8_t *dst, int32_t dst_stride) { v16u8 top, left, out; v8u16 sum_h, sum_top, sum_left; v4u32 sum_w; v2u64 sum_d; top = LD_UB(src_top); left = LD_UB(src_left); HADD_UB2_UH(top, left, sum_top, sum_left); sum_h = sum_top + sum_left; sum_w = __msa_hadd_u_w(sum_h, sum_h); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 5); out = (v16u8)__msa_splati_b((v16i8)sum_w, 0); ST_UB8(out, out, out, out, out, out, out, out, dst, dst_stride); dst += (8 * dst_stride); ST_UB8(out, out, out, out, out, out, out, out, dst, dst_stride); }