static void intra_predict_dc_tl_32x32_msa(const uint8_t *src, uint8_t *dst, int32_t dst_stride) { uint32_t row; v16u8 data0, data1, out; v8u16 sum_h, sum_data0, sum_data1; v4u32 sum_w; v2u64 sum_d; LD_UB2(src, 16, data0, data1); HADD_UB2_UH(data0, data1, sum_data0, sum_data1); sum_h = sum_data0 + sum_data1; sum_w = __msa_hadd_u_w(sum_h, sum_h); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 5); out = (v16u8)__msa_splati_b((v16i8)sum_w, 0); for (row = 16; row--;) { ST_UB2(out, out, dst, 16); dst += dst_stride; ST_UB2(out, out, dst, 16); dst += dst_stride; } }
static void intra_predict_horiz_32x32_msa(const uint8_t *src, uint8_t *dst, int32_t dst_stride) { uint32_t row; uint8_t inp0, inp1, inp2, inp3; v16u8 src0, src1, src2, src3; for (row = 8; row--;) { inp0 = src[0]; inp1 = src[1]; inp2 = src[2]; inp3 = src[3]; src += 4; src0 = (v16u8)__msa_fill_b(inp0); src1 = (v16u8)__msa_fill_b(inp1); src2 = (v16u8)__msa_fill_b(inp2); src3 = (v16u8)__msa_fill_b(inp3); ST_UB2(src0, src0, dst, 16); dst += dst_stride; ST_UB2(src1, src1, dst, 16); dst += dst_stride; ST_UB2(src2, src2, dst, 16); dst += dst_stride; ST_UB2(src3, src3, dst, 16); dst += dst_stride; } }
static void intra_predict_dc_32x32_msa(const uint8_t *src_top, const uint8_t *src_left, uint8_t *dst, int32_t dst_stride) { uint32_t row; v16u8 top0, top1, left0, left1, out; v8u16 sum_h, sum_top0, sum_top1, sum_left0, sum_left1; v4u32 sum_w; v2u64 sum_d; LD_UB2(src_top, 16, top0, top1); LD_UB2(src_left, 16, left0, left1); HADD_UB2_UH(top0, top1, sum_top0, sum_top1); HADD_UB2_UH(left0, left1, sum_left0, sum_left1); sum_h = sum_top0 + sum_top1; sum_h += sum_left0 + sum_left1; sum_w = __msa_hadd_u_w(sum_h, sum_h); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_pckev_w((v4i32)sum_d, (v4i32)sum_d); sum_d = __msa_hadd_u_d(sum_w, sum_w); sum_w = (v4u32)__msa_srari_w((v4i32)sum_d, 6); out = (v16u8)__msa_splati_b((v16i8)sum_w, 0); for (row = 16; row--;) { ST_UB2(out, out, dst, 16); dst += dst_stride; ST_UB2(out, out, dst, 16); dst += dst_stride; } }
static void intra_predict_128dc_32x32_msa(uint8_t *dst, int32_t dst_stride) { uint32_t row; const v16u8 out = (v16u8)__msa_ldi_b(128); for (row = 16; row--;) { ST_UB2(out, out, dst, 16); dst += dst_stride; ST_UB2(out, out, dst, 16); dst += dst_stride; } }
static WEBP_INLINE void PredictLineInverse0(const uint8_t* src, const uint8_t* pred, uint8_t* dst, int length) { v16u8 src0, pred0, dst0; assert(length >= 0); while (length >= 32) { v16u8 src1, pred1, dst1; LD_UB2(src, 16, src0, src1); LD_UB2(pred, 16, pred0, pred1); SUB2(src0, pred0, src1, pred1, dst0, dst1); ST_UB2(dst0, dst1, dst, 16); src += 32; pred += 32; dst += 32; length -= 32; } if (length > 0) { int i; if (length >= 16) { src0 = LD_UB(src); pred0 = LD_UB(pred); dst0 = src0 - pred0; ST_UB(dst0, dst); src += 16; pred += 16; dst += 16; length -= 16; } for (i = 0; i < length; i++) { dst[i] = src[i] - pred[i]; } } }
void vp8_loop_filter_simple_horizontal_edge_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr) { v16u8 p1, p0, q1, q0; v16u8 mask, b_limit; b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); LD_UB4(src - (pitch << 1), pitch, p1, p0, q0, q1); VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask); VP8_SIMPLE_FILT(p1, p0, q0, q1, mask); ST_UB2(p0, q0, (src - pitch), pitch); }
static void intra_predict_vert_32x32_msa(const uint8_t *src, uint8_t *dst, int32_t dst_stride) { uint32_t row; v16u8 src1, src2; src1 = LD_UB(src); src2 = LD_UB(src + 16); for (row = 32; row--;) { ST_UB2(src1, src2, dst, 16); dst += dst_stride; } }
void ff_vp8_v_loop_filter_simple_msa(uint8_t *src, ptrdiff_t pitch, int b_limit_ptr) { v16u8 p1, p0, q1, q0; v16u8 mask, b_limit; b_limit = (v16u8) __msa_fill_b(b_limit_ptr); /* load vector elements */ LD_UB4(src - (pitch << 1), pitch, p1, p0, q0, q1); VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask); VP8_SIMPLE_FILT(p1, p0, q0, q1, mask); ST_UB2(p0, q0, (src - pitch), pitch); }
static void avc_sub4x4_dct_msa( uint8_t *p_src, int32_t i_src_stride, uint8_t *p_ref, int32_t i_dst_stride, int16_t *p_dst ) { uint32_t i_src0, i_src1, i_src2, i_src3; uint32_t i_ref0, i_ref1, i_ref2, i_ref3; v16i8 src = { 0 }; v16i8 ref = { 0 }; v16u8 inp0, inp1; v8i16 diff0, diff1, diff2, diff3; v8i16 temp0, temp1, temp2, temp3; LW4( p_src, i_src_stride, i_src0, i_src1, i_src2, i_src3 ); LW4( p_ref, i_dst_stride, i_ref0, i_ref1, i_ref2, i_ref3 ); INSERT_W4_SB( i_src0, i_src1, i_src2, i_src3, src ); INSERT_W4_SB( i_ref0, i_ref1, i_ref2, i_ref3, ref ); ILVRL_B2_UB( src, ref, inp0, inp1 ); HSUB_UB2_SH( inp0, inp1, diff0, diff2 ); diff1 = ( v8i16 ) __msa_ilvl_d( ( v2i64 ) diff0, ( v2i64 ) diff0 ); diff3 = ( v8i16 ) __msa_ilvl_d( ( v2i64 ) diff2, ( v2i64 ) diff2 ); BUTTERFLY_4( diff0, diff1, diff2, diff3, temp0, temp1, temp2, temp3 ); diff0 = temp0 + temp1; diff1 = ( temp3 << 1 ) + temp2; diff2 = temp0 - temp1; diff3 = temp3 - ( temp2 << 1 ); TRANSPOSE4x4_SH_SH( diff0, diff1, diff2, diff3, temp0, temp1, temp2, temp3 ); BUTTERFLY_4( temp0, temp1, temp2, temp3, diff0, diff1, diff2, diff3 ); temp0 = diff0 + diff1; temp1 = ( diff3 << 1 ) + diff2; temp2 = diff0 - diff1; temp3 = diff3 - ( diff2 << 1 ); ILVR_D2_UB( temp1, temp0, temp3, temp2, inp0, inp1 ); ST_UB2( inp0, inp1, p_dst, 8 ); }
static void mbloop_filter_horizontal_edge_y_msa(uint8_t *src, int32_t pitch, const uint8_t b_limit_in, const uint8_t limit_in, const uint8_t thresh_in) { uint8_t *temp_src; v16u8 p3, p2, p1, p0, q3, q2, q1, q0; v16u8 mask, hev, flat, thresh, limit, b_limit; b_limit = (v16u8)__msa_fill_b(b_limit_in); limit = (v16u8)__msa_fill_b(limit_in); thresh = (v16u8)__msa_fill_b(thresh_in); temp_src = src - (pitch << 2); LD_UB8(temp_src, pitch, p3, p2, p1, p0, q0, q1, q2, q3); LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat); VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); temp_src = src - 3 * pitch; ST_UB4(p2, p1, p0, q0, temp_src, pitch); temp_src += (4 * pitch); ST_UB2(q1, q2, temp_src, pitch); }
void ff_vp8_v_loop_filter16_msa(uint8_t *src, ptrdiff_t pitch, int b_limit_in, int limit_in, int thresh_in) { uint8_t *temp_src; v16u8 p3, p2, p1, p0, q3, q2, q1, q0; v16u8 mask, hev, flat, thresh, limit, b_limit; b_limit = (v16u8) __msa_fill_b(b_limit_in); limit = (v16u8) __msa_fill_b(limit_in); thresh = (v16u8) __msa_fill_b(thresh_in); /* load vector elements */ temp_src = src - (pitch << 2); LD_UB8(temp_src, pitch, p3, p2, p1, p0, q0, q1, q2, q3); LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat); VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); /* store vector elements */ temp_src = src - 3 * pitch; ST_UB4(p2, p1, p0, q0, temp_src, pitch); temp_src += (4 * pitch); ST_UB2(q1, q2, temp_src, pitch); }
void yuv_abgr_convert_msa (JSAMPROW p_in_y, JSAMPROW p_in_cb, JSAMPROW p_in_cr, JSAMPROW p_rgb, JDIMENSION out_width) { int y, cb, cr; unsigned int col, num_cols_mul_16 = out_width >> 4; unsigned int remaining_wd = out_width & 0xF; v16i8 alpha = __msa_ldi_b(0xFF); v16i8 const_128 = __msa_ldi_b(128); v16u8 out0, out1, out2, out3, input_y = {0}; v16i8 input_cb, input_cr, out_rgb0, out_rgb1, out_ab0, out_ab1; v8i16 y_h0, y_h1, cb_h0, cb_h1, cr_h0, cr_h1; v4i32 cb_w0, cb_w1, cb_w2, cb_w3, cr_w0, cr_w1, cr_w2, cr_w3, zero = {0}; v16i8 out_r0, out_g0, out_b0; for (col = num_cols_mul_16; col--;) { input_y = LD_UB(p_in_y); input_cb = LD_SB(p_in_cb); input_cr = LD_SB(p_in_cr); p_in_y += 16; p_in_cb += 16; p_in_cr += 16; input_cb -= const_128; input_cr -= const_128; UNPCK_UB_SH(input_y, y_h0, y_h1); UNPCK_SB_SH(input_cb, cb_h0, cb_h1); UNPCK_SB_SH(input_cr, cr_h0, cr_h1); CALC_G4_FRM_YUV(y_h0, y_h1, cb_h0, cb_h1, cr_h0, cr_h1, out_g0); UNPCK_SH_SW(cr_h0, cr_w0, cr_w1); UNPCK_SH_SW(cr_h1, cr_w2, cr_w3); CALC_R4_FRM_YUV(y_h0, y_h1, cr_w0, cr_w1, cr_w2, cr_w3, out_r0); UNPCK_SH_SW(cb_h0, cb_w0, cb_w1); UNPCK_SH_SW(cb_h1, cb_w2, cb_w3); CALC_B4_FRM_YUV(y_h0, y_h1, cb_w0, cb_w1, cb_w2, cb_w3, out_b0); ILVRL_B2_SB(out_r0, out_g0, out_rgb0, out_rgb1); ILVRL_B2_SB(out_b0, alpha, out_ab0, out_ab1); ILVRL_H2_UB(out_rgb0, out_ab0, out0, out1); ILVRL_H2_UB(out_rgb1, out_ab1, out2, out3); ST_UB4(out0, out1, out2, out3, p_rgb, 16); p_rgb += 16 * 4; } if (remaining_wd >= 8) { uint64_t in_y, in_cb, in_cr; v16i8 input_cbcr = {0}; in_y = LD(p_in_y); in_cb = LD(p_in_cb); in_cr = LD(p_in_cr); p_in_y += 8; p_in_cb += 8; p_in_cr += 8; input_y = (v16u8) __msa_insert_d((v2i64) input_y, 0, in_y); input_cbcr = (v16i8) __msa_insert_d((v2i64) input_cbcr, 0, in_cb); input_cbcr = (v16i8) __msa_insert_d((v2i64) input_cbcr, 1, in_cr); input_cbcr -= const_128; y_h0 = (v8i16) __msa_ilvr_b((v16i8) zero, (v16i8) input_y); UNPCK_SB_SH(input_cbcr, cb_h0, cr_h0); UNPCK_SH_SW(cb_h0, cb_w0, cb_w1); UNPCK_SH_SW(cr_h0, cr_w0, cr_w1); CALC_R2_FRM_YUV(y_h0, cr_w0, cr_w1, out_r0); CALC_G2_FRM_YUV(y_h0, cb_h0, cr_h0, out_g0); CALC_B2_FRM_YUV(y_h0, cb_w0, cb_w1, out_b0); out_rgb0 = (v16i8) __msa_ilvr_b((v16i8) out_r0, (v16i8) out_g0); out_ab0 = (v16i8) __msa_ilvr_b((v16i8) out_b0, alpha); ILVRL_H2_UB(out_rgb0, out_ab0, out0, out1); ST_UB2(out0, out1, p_rgb, 16); p_rgb += 16 * 2; remaining_wd -= 8; } for (col = 0; col < remaining_wd; col++) { y = (int) (p_in_y[col]); cb = (int) (p_in_cb[col]) - 128; cr = (int) (p_in_cr[col]) - 128; p_rgb[0] = 0xFF; p_rgb[1] = clip_pixel(y + ROUND_POWER_OF_TWO(FIX_1_77200 * cb, 16)); p_rgb[2] = clip_pixel(y + ROUND_POWER_OF_TWO(((-FIX_0_34414) * cb - FIX_0_71414 * cr), 16)); p_rgb[3] = clip_pixel(y + ROUND_POWER_OF_TWO(FIX_1_40200 * cr, 16)); p_rgb += 4; } }
void vpx_lpf_horizontal_8_dual_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *b_limit1, const uint8_t *limit1, const uint8_t *thresh1) { v16u8 p3, p2, p1, p0, q3, q2, q1, q0; v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out; v16u8 flat, mask, hev, tmp, thresh, b_limit, limit; v8u16 p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r; v8u16 p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l; v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r; v8i16 p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l; v16u8 zero = { 0 }; /* load vector elements */ LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); thresh = (v16u8)__msa_fill_b(*thresh0); tmp = (v16u8)__msa_fill_b(*thresh1); thresh = (v16u8)__msa_ilvr_d((v2i64)tmp, (v2i64)thresh); b_limit = (v16u8)__msa_fill_b(*b_limit0); tmp = (v16u8)__msa_fill_b(*b_limit1); b_limit = (v16u8)__msa_ilvr_d((v2i64)tmp, (v2i64)b_limit); limit = (v16u8)__msa_fill_b(*limit0); tmp = (v16u8)__msa_fill_b(*limit1); limit = (v16u8)__msa_ilvr_d((v2i64)tmp, (v2i64)limit); /* mask and hev */ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat); VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out); if (__msa_test_bz_v(flat)) { ST_UB4(p1_out, p0_out, q0_out, q1_out, (src - 2 * pitch), pitch); } else { ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r); ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l); ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l); VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l); /* convert 16 bit output data into 8 bit */ PCKEV_B4_SH(p2_filt8_l, p2_filt8_r, p1_filt8_l, p1_filt8_r, p0_filt8_l, p0_filt8_r, q0_filt8_l, q0_filt8_r, p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r); PCKEV_B2_SH(q1_filt8_l, q1_filt8_r, q2_filt8_l, q2_filt8_r, q1_filt8_r, q2_filt8_r); /* store pixel values */ p2_out = __msa_bmnz_v(p2, (v16u8)p2_filt8_r, flat); p1_out = __msa_bmnz_v(p1_out, (v16u8)p1_filt8_r, flat); p0_out = __msa_bmnz_v(p0_out, (v16u8)p0_filt8_r, flat); q0_out = __msa_bmnz_v(q0_out, (v16u8)q0_filt8_r, flat); q1_out = __msa_bmnz_v(q1_out, (v16u8)q1_filt8_r, flat); q2_out = __msa_bmnz_v(q2, (v16u8)q2_filt8_r, flat); src -= 3 * pitch; ST_UB4(p2_out, p1_out, p0_out, q0_out, src, pitch); src += (4 * pitch); ST_UB2(q1_out, q2_out, src, pitch); src += (2 * pitch); } }
static void hevc_addblk_32x32_msa(int16_t *coeffs, uint8_t *dst, int32_t stride) { uint8_t loop_cnt; uint8_t *temp_dst = dst; v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; v8i16 dst_r0, dst_l0, dst_r1, dst_l1, dst_r2, dst_l2, dst_r3, dst_l3; v8i16 in0, in1, in2, in3, in4, in5, in6, in7; /* Pre-load for next iteration */ LD_UB2(temp_dst, 16, dst4, dst5); temp_dst += stride; LD_UB2(temp_dst, 16, dst6, dst7); temp_dst += stride; LD_SH4(coeffs, 16, in0, in2, in4, in6); LD_SH4((coeffs + 8), 16, in1, in3, in5, in7); coeffs += 64; for (loop_cnt = 14; loop_cnt--;) { UNPCK_UB_SH(dst4, dst_r0, dst_l0); UNPCK_UB_SH(dst5, dst_r1, dst_l1); UNPCK_UB_SH(dst6, dst_r2, dst_l2); UNPCK_UB_SH(dst7, dst_r3, dst_l3); dst_r0 += in0; dst_l0 += in1; dst_r1 += in2; dst_l1 += in3; dst_r2 += in4; dst_l2 += in5; dst_r3 += in6; dst_l3 += in7; /* Pre-load for next iteration */ LD_UB2(temp_dst, 16, dst4, dst5); temp_dst += stride; LD_UB2(temp_dst, 16, dst6, dst7); temp_dst += stride; LD_SH4(coeffs, 16, in0, in2, in4, in6); LD_SH4((coeffs + 8), 16, in1, in3, in5, in7); coeffs += 64; CLIP_SH4_0_255(dst_r0, dst_l0, dst_r1, dst_l1); CLIP_SH4_0_255(dst_r2, dst_l2, dst_r3, dst_l3); PCKEV_B4_UB(dst_l0, dst_r0, dst_l1, dst_r1, dst_l2, dst_r2, dst_l3, dst_r3, dst0, dst1, dst2, dst3); ST_UB2(dst0, dst1, dst, 16); dst += stride; ST_UB2(dst2, dst3, dst, 16); dst += stride; } UNPCK_UB_SH(dst4, dst_r0, dst_l0); UNPCK_UB_SH(dst5, dst_r1, dst_l1); UNPCK_UB_SH(dst6, dst_r2, dst_l2); UNPCK_UB_SH(dst7, dst_r3, dst_l3); dst_r0 += in0; dst_l0 += in1; dst_r1 += in2; dst_l1 += in3; dst_r2 += in4; dst_l2 += in5; dst_r3 += in6; dst_l3 += in7; /* Pre-load for next iteration */ LD_UB2(temp_dst, 16, dst4, dst5); temp_dst += stride; LD_UB2(temp_dst, 16, dst6, dst7); temp_dst += stride; LD_SH4(coeffs, 16, in0, in2, in4, in6); LD_SH4((coeffs + 8), 16, in1, in3, in5, in7); CLIP_SH4_0_255(dst_r0, dst_l0, dst_r1, dst_l1); CLIP_SH4_0_255(dst_r2, dst_l2, dst_r3, dst_l3); PCKEV_B4_UB(dst_l0, dst_r0, dst_l1, dst_r1, dst_l2, dst_r2, dst_l3, dst_r3, dst0, dst1, dst2, dst3); ST_UB2(dst0, dst1, dst, 16); dst += stride; ST_UB2(dst2, dst3, dst, 16); dst += stride; UNPCK_UB_SH(dst4, dst_r0, dst_l0); UNPCK_UB_SH(dst5, dst_r1, dst_l1); UNPCK_UB_SH(dst6, dst_r2, dst_l2); UNPCK_UB_SH(dst7, dst_r3, dst_l3); dst_r0 += in0; dst_l0 += in1; dst_r1 += in2; dst_l1 += in3; dst_r2 += in4; dst_l2 += in5; dst_r3 += in6; dst_l3 += in7; CLIP_SH4_0_255(dst_r0, dst_l0, dst_r1, dst_l1); CLIP_SH4_0_255(dst_r2, dst_l2, dst_r3, dst_l3); PCKEV_B4_UB(dst_l0, dst_r0, dst_l1, dst_r1, dst_l2, dst_r2, dst_l3, dst_r3, dst0, dst1, dst2, dst3); ST_UB2(dst0, dst1, dst, 16); dst += stride; ST_UB2(dst2, dst3, dst, 16); }