void ff_vp8_h_loop_filter16_inner_msa(uint8_t *src, ptrdiff_t pitch, int32_t e, int32_t i, int32_t h) { v16u8 mask, hev, flat; v16u8 thresh, b_limit, limit; v16u8 p3, p2, p1, p0, q3, q2, q1, q0; v16u8 row0, row1, row2, row3, row4, row5, row6, row7; v16u8 row8, row9, row10, row11, row12, row13, row14, row15; v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); LD_UB8(src - 4 + (8 * pitch), pitch, row8, row9, row10, row11, row12, row13, row14, row15); TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8, row9, row10, row11, row12, row13, row14, row15, p3, p2, p1, p0, q0, q1, q2, q3); thresh = (v16u8) __msa_fill_b(h); b_limit = (v16u8) __msa_fill_b(e); limit = (v16u8) __msa_fill_b(i); LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat); VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1); ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3); ILVL_B2_SH(p0, p1, q1, q0, tmp0, tmp1); ILVRL_H2_SH(tmp1, tmp0, tmp4, tmp5); src -= 2; ST4x8_UB(tmp2, tmp3, src, pitch); src += (8 * pitch); ST4x8_UB(tmp4, tmp5, src, pitch); }
void vp8_short_fdct8x4_msa(int16_t *input, int16_t *output, int32_t pitch) { v8i16 in0, in1, in2, in3; v8i16 temp0, temp1, tmp0, tmp1; v8i16 const0, const1, const2; v8i16 coeff = { 2217, 5352, -5352, 14500, 7500, 12000, 25000, 26000 }; v8i16 zero = { 0 }; v4i32 vec0_w, vec1_w, vec2_w, vec3_w; LD_SH4(input, pitch / 2, in0, in1, in2, in3); TRANSPOSE4x4_H(in0, in1, in2, in3, in0, in1, in2, in3); BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); SLLI_4V(temp0, temp1, in1, in3, 3); in0 = temp0 + temp1; in2 = temp0 - temp1; SET_DOTP_VALUES(coeff, 0, 1, 2, const1, const2); temp0 = __msa_splati_h(coeff, 3); vec1_w = (v4i32)__msa_ilvev_h(zero, temp0); coeff = __msa_ilvl_h(zero, coeff); vec3_w = __msa_splati_w((v4i32)coeff, 0); ILVRL_H2_SH(in3, in1, tmp1, tmp0); vec0_w = vec1_w; vec2_w = vec3_w; DPADD_SH4_SW(tmp1, tmp0, tmp1, tmp0, const1, const1, const2, const2, vec0_w, vec1_w, vec2_w, vec3_w); SRA_4V(vec1_w, vec0_w, vec3_w, vec2_w, 12); PCKEV_H2_SH(vec1_w, vec0_w, vec3_w, vec2_w, in1, in3); TRANSPOSE4x4_H(in0, in1, in2, in3, in0, in1, in2, in3); BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); in0 = temp0 + temp1 + 7; in2 = temp0 - temp1 + 7; in0 >>= 4; in2 >>= 4; SPLATI_W2_SW(coeff, 2, vec3_w, vec1_w); vec3_w += vec1_w; vec1_w = __msa_splati_w((v4i32)coeff, 1); const0 = RET_1_IF_NZERO_H(in3); ILVRL_H2_SH(in3, in1, tmp1, tmp0); vec0_w = vec1_w; vec2_w = vec3_w; DPADD_SH4_SW(tmp1, tmp0, tmp1, tmp0, const1, const1, const2, const2, vec0_w, vec1_w, vec2_w, vec3_w); SRA_4V(vec1_w, vec0_w, vec3_w, vec2_w, 16); PCKEV_H2_SH(vec1_w, vec0_w, vec3_w, vec2_w, in1, in3); in1 += const0; PCKEV_D2_SH(in1, in0, in3, in2, temp0, temp1); ST_SH2(temp0, temp1, output, 8); PCKOD_D2_SH(in1, in0, in3, in2, in0, in2); ST_SH2(in0, in2, output + 16, 8); }
void vp9_lpf_vertical_4_dual_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit0_ptr, const uint8_t *limit0_ptr, const uint8_t *thresh0_ptr, const uint8_t *b_limit1_ptr, const uint8_t *limit1_ptr, const uint8_t *thresh1_ptr) { v16u8 mask, hev, flat; v16u8 thresh0, b_limit0, limit0, thresh1, b_limit1, limit1; v16u8 p3, p2, p1, p0, q3, q2, q1, q0; v16u8 row0, row1, row2, row3, row4, row5, row6, row7; v16u8 row8, row9, row10, row11, row12, row13, row14, row15; v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); LD_UB8(src - 4 + (8 * pitch), pitch, row8, row9, row10, row11, row12, row13, row14, row15); TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8, row9, row10, row11, row12, row13, row14, row15, p3, p2, p1, p0, q0, q1, q2, q3); thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr); thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr); thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); b_limit0 = (v16u8)__msa_fill_b(*b_limit0_ptr); b_limit1 = (v16u8)__msa_fill_b(*b_limit1_ptr); b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); limit0 = (v16u8)__msa_fill_b(*limit0_ptr); limit1 = (v16u8)__msa_fill_b(*limit1_ptr); limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev, mask, flat); VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1); ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1); ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3); ILVL_B2_SH(p0, p1, q1, q0, tmp0, tmp1); ILVRL_H2_SH(tmp1, tmp0, tmp4, tmp5); src -= 2; ST4x8_UB(tmp2, tmp3, src, pitch); src += (8 * pitch); ST4x8_UB(tmp4, tmp5, src, pitch); }
void vp9_lpf_vertical_4_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, const uint8_t *thresh_ptr, int32_t count) { v16u8 mask, hev, flat, limit, thresh, b_limit; v16u8 p3, p2, p1, p0, q3, q2, q1, q0; v8i16 vec0, vec1, vec2, vec3; (void)count; LD_UB8((src - 4), pitch, p3, p2, p1, p0, q0, q1, q2, q3); thresh = (v16u8)__msa_fill_b(*thresh_ptr); b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); limit = (v16u8)__msa_fill_b(*limit_ptr); TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3, p3, p2, p1, p0, q0, q1, q2, q3); LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat); VP9_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1); ILVR_B2_SH(p0, p1, q1, q0, vec0, vec1); ILVRL_H2_SH(vec1, vec0, vec2, vec3); src -= 2; ST4x4_UB(vec2, vec2, 0, 1, 2, 3, src, pitch); src += 4 * pitch; ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch); }
int32_t vp8_block_error_msa(int16_t *coeff_ptr, int16_t *dq_coeff_ptr) { int32_t err = 0; uint32_t loop_cnt; v8i16 coeff, dq_coeff, coeff0, coeff1; v4i32 diff0, diff1; v2i64 err0 = { 0 }; v2i64 err1 = { 0 }; for (loop_cnt = 2; loop_cnt--;) { coeff = LD_SH(coeff_ptr); dq_coeff = LD_SH(dq_coeff_ptr); ILVRL_H2_SH(coeff, dq_coeff, coeff0, coeff1); HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); DPADD_SD2_SD(diff0, diff1, err0, err1); coeff_ptr += 8; dq_coeff_ptr += 8; } err0 += __msa_splati_d(err0, 1); err1 += __msa_splati_d(err1, 1); err = __msa_copy_s_d(err0, 0); err += __msa_copy_s_d(err1, 0); return err; }
void fdct8x16_1d_column(const int16_t *input, int16_t *tmp_ptr, int32_t src_stride) { v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; v8i16 in0, in1, in2, in3, in4, in5, in6, in7; v8i16 in8, in9, in10, in11, in12, in13, in14, in15; v8i16 stp21, stp22, stp23, stp24, stp25, stp26, stp30; v8i16 stp31, stp32, stp33, stp34, stp35, stp36, stp37; v8i16 vec0, vec1, vec2, vec3, vec4, vec5, cnst0, cnst1, cnst4, cnst5; v8i16 coeff = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, -cospi_8_64, -cospi_24_64, cospi_12_64, cospi_20_64 }; v8i16 coeff1 = { cospi_2_64, cospi_30_64, cospi_14_64, cospi_18_64, cospi_10_64, cospi_22_64, cospi_6_64, cospi_26_64 }; v8i16 coeff2 = { -cospi_2_64, -cospi_10_64, -cospi_18_64, -cospi_26_64, 0, 0, 0, 0 }; LD_SH16(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in12, in13, in14, in15); SLLI_4V(in0, in1, in2, in3, 2); SLLI_4V(in4, in5, in6, in7, 2); SLLI_4V(in8, in9, in10, in11, 2); SLLI_4V(in12, in13, in14, in15, 2); ADD4(in0, in15, in1, in14, in2, in13, in3, in12, tmp0, tmp1, tmp2, tmp3); ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7); FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); ST_SH8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp_ptr, 32); SUB4(in0, in15, in1, in14, in2, in13, in3, in12, in15, in14, in13, in12); SUB4(in4, in11, in5, in10, in6, in9, in7, in8, in11, in10, in9, in8); tmp_ptr += 16; /* stp 1 */ ILVL_H2_SH(in10, in13, in11, in12, vec2, vec4); ILVR_H2_SH(in10, in13, in11, in12, vec3, vec5); cnst4 = __msa_splati_h(coeff, 0); stp25 = DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst4); cnst5 = __msa_splati_h(coeff, 1); cnst5 = __msa_ilvev_h(cnst5, cnst4); stp22 = DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst5); stp24 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst4); stp23 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst5); /* stp2 */ BUTTERFLY_4(in8, in9, stp22, stp23, stp30, stp31, stp32, stp33); BUTTERFLY_4(in15, in14, stp25, stp24, stp37, stp36, stp35, stp34); ILVL_H2_SH(stp36, stp31, stp35, stp32, vec2, vec4); ILVR_H2_SH(stp36, stp31, stp35, stp32, vec3, vec5); SPLATI_H2_SH(coeff, 2, 3, cnst0, cnst1); cnst0 = __msa_ilvev_h(cnst0, cnst1); stp26 = DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst0); cnst0 = __msa_splati_h(coeff, 4); cnst1 = __msa_ilvev_h(cnst1, cnst0); stp21 = DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst1); BUTTERFLY_4(stp30, stp37, stp26, stp21, in8, in15, in14, in9); ILVRL_H2_SH(in15, in8, vec1, vec0); SPLATI_H2_SH(coeff1, 0, 1, cnst0, cnst1); cnst0 = __msa_ilvev_h(cnst0, cnst1); in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); ST_SH(in8, tmp_ptr); cnst0 = __msa_splati_h(coeff2, 0); cnst0 = __msa_ilvev_h(cnst1, cnst0); in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); ST_SH(in8, tmp_ptr + 224); ILVRL_H2_SH(in14, in9, vec1, vec0); SPLATI_H2_SH(coeff1, 2, 3, cnst0, cnst1); cnst1 = __msa_ilvev_h(cnst1, cnst0); in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst1); ST_SH(in8, tmp_ptr + 128); cnst1 = __msa_splati_h(coeff2, 2); cnst0 = __msa_ilvev_h(cnst0, cnst1); in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); ST_SH(in8, tmp_ptr + 96); SPLATI_H2_SH(coeff, 2, 5, cnst0, cnst1); cnst1 = __msa_ilvev_h(cnst1, cnst0); stp25 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst1); cnst1 = __msa_splati_h(coeff, 3); cnst1 = __msa_ilvev_h(cnst0, cnst1); stp22 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst1); /* stp4 */ ADD2(stp34, stp25, stp33, stp22, in13, in10); ILVRL_H2_SH(in13, in10, vec1, vec0); SPLATI_H2_SH(coeff1, 4, 5, cnst0, cnst1); cnst0 = __msa_ilvev_h(cnst0, cnst1); in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); ST_SH(in8, tmp_ptr + 64); cnst0 = __msa_splati_h(coeff2, 1); cnst0 = __msa_ilvev_h(cnst1, cnst0); in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); ST_SH(in8, tmp_ptr + 160); SUB2(stp34, stp25, stp33, stp22, in12, in11); ILVRL_H2_SH(in12, in11, vec1, vec0); SPLATI_H2_SH(coeff1, 6, 7, cnst0, cnst1); cnst1 = __msa_ilvev_h(cnst1, cnst0); in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst1); ST_SH(in8, tmp_ptr + 192); cnst1 = __msa_splati_h(coeff2, 3); cnst0 = __msa_ilvev_h(cnst0, cnst1); in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); ST_SH(in8, tmp_ptr + 32); }
void vpx_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *b_limit1, const uint8_t *limit1, const uint8_t *thresh1) { uint8_t *temp_src; v16u8 p3, p2, p1, p0, q3, q2, q1, q0; v16u8 p1_out, p0_out, q0_out, q1_out; v16u8 flat, mask, hev, thresh, b_limit, limit; v16u8 row4, row5, row6, row7, row12, row13, row14, row15; v8u16 p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r; v8u16 p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l; v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r; v8i16 p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l; v16u8 zero = { 0 }; v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; temp_src = src - 4; LD_UB8(temp_src, pitch, p0, p1, p2, p3, row4, row5, row6, row7); temp_src += (8 * pitch); LD_UB8(temp_src, pitch, q3, q2, q1, q0, row12, row13, row14, row15); /* transpose 16x8 matrix into 8x16 */ TRANSPOSE16x8_UB_UB(p0, p1, p2, p3, row4, row5, row6, row7, q3, q2, q1, q0, row12, row13, row14, row15, p3, p2, p1, p0, q0, q1, q2, q3); thresh = (v16u8)__msa_fill_b(*thresh0); vec0 = (v8i16)__msa_fill_b(*thresh1); thresh = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)thresh); b_limit = (v16u8)__msa_fill_b(*b_limit0); vec0 = (v8i16)__msa_fill_b(*b_limit1); b_limit = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)b_limit); limit = (v16u8)__msa_fill_b(*limit0); vec0 = (v8i16)__msa_fill_b(*limit1); limit = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)limit); /* mask and hev */ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat); /* flat4 */ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); /* filter4 */ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out); if (__msa_test_bz_v(flat)) { ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1); ILVRL_H2_SH(vec1, vec0, vec2, vec3); ILVL_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1); ILVRL_H2_SH(vec1, vec0, vec4, vec5); src -= 2; ST4x8_UB(vec2, vec3, src, pitch); src += 8 * pitch; ST4x8_UB(vec4, vec5, src, pitch); } else { ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r); ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l); ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l); /* filter8 */ VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l); /* convert 16 bit output data into 8 bit */ PCKEV_B4_SH(p2_filt8_l, p2_filt8_r, p1_filt8_l, p1_filt8_r, p0_filt8_l, p0_filt8_r, q0_filt8_l, q0_filt8_r, p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r); PCKEV_B2_SH(q1_filt8_l, q1_filt8_r, q2_filt8_l, q2_filt8_r, q1_filt8_r, q2_filt8_r); /* store pixel values */ p2 = __msa_bmnz_v(p2, (v16u8)p2_filt8_r, flat); p1 = __msa_bmnz_v(p1_out, (v16u8)p1_filt8_r, flat); p0 = __msa_bmnz_v(p0_out, (v16u8)p0_filt8_r, flat); q0 = __msa_bmnz_v(q0_out, (v16u8)q0_filt8_r, flat); q1 = __msa_bmnz_v(q1_out, (v16u8)q1_filt8_r, flat); q2 = __msa_bmnz_v(q2, (v16u8)q2_filt8_r, flat); ILVR_B2_SH(p1, p2, q0, p0, vec0, vec1); ILVRL_H2_SH(vec1, vec0, vec3, vec4); ILVL_B2_SH(p1, p2, q0, p0, vec0, vec1); ILVRL_H2_SH(vec1, vec0, vec6, vec7); ILVRL_B2_SH(q2, q1, vec2, vec5); src -= 3; ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch); ST2x4_UB(vec2, 0, src + 4, pitch); src += (4 * pitch); ST4x4_UB(vec4, vec4, 0, 1, 2, 3, src, pitch); ST2x4_UB(vec2, 4, src + 4, pitch); src += (4 * pitch); ST4x4_UB(vec6, vec6, 0, 1, 2, 3, src, pitch); ST2x4_UB(vec5, 0, src + 4, pitch); src += (4 * pitch); ST4x4_UB(vec7, vec7, 0, 1, 2, 3, src, pitch); ST2x4_UB(vec5, 4, src + 4, pitch); } }
void vpx_lpf_vertical_8_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, const uint8_t *thresh_ptr) { v16u8 p3, p2, p1, p0, q3, q2, q1, q0; v16u8 p1_out, p0_out, q0_out, q1_out; v16u8 flat, mask, hev, thresh, b_limit, limit; v8u16 p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r; v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r; v16u8 zero = { 0 }; v8i16 vec0, vec1, vec2, vec3, vec4; /* load vector elements */ LD_UB8(src - 4, pitch, p3, p2, p1, p0, q0, q1, q2, q3); TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3, p3, p2, p1, p0, q0, q1, q2, q3); thresh = (v16u8)__msa_fill_b(*thresh_ptr); b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); limit = (v16u8)__msa_fill_b(*limit_ptr); /* mask and hev */ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat); /* flat4 */ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); /* filter4 */ VP9_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out); flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat); if (__msa_test_bz_v(flat)) { /* Store 4 pixels p1-_q1 */ ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1); ILVRL_H2_SH(vec1, vec0, vec2, vec3); src -= 2; ST4x4_UB(vec2, vec2, 0, 1, 2, 3, src, pitch); src += 4 * pitch; ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch); } else { ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r); /* convert 16 bit output data into 8 bit */ PCKEV_B4_SH(p2_filt8_r, p2_filt8_r, p1_filt8_r, p1_filt8_r, p0_filt8_r, p0_filt8_r, q0_filt8_r, q0_filt8_r, p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r); PCKEV_B2_SH(q1_filt8_r, q1_filt8_r, q2_filt8_r, q2_filt8_r, q1_filt8_r, q2_filt8_r); /* store pixel values */ p2 = __msa_bmnz_v(p2, (v16u8)p2_filt8_r, flat); p1 = __msa_bmnz_v(p1_out, (v16u8)p1_filt8_r, flat); p0 = __msa_bmnz_v(p0_out, (v16u8)p0_filt8_r, flat); q0 = __msa_bmnz_v(q0_out, (v16u8)q0_filt8_r, flat); q1 = __msa_bmnz_v(q1_out, (v16u8)q1_filt8_r, flat); q2 = __msa_bmnz_v(q2, (v16u8)q2_filt8_r, flat); /* Store 6 pixels p2-_q2 */ ILVR_B2_SH(p1, p2, q0, p0, vec0, vec1); ILVRL_H2_SH(vec1, vec0, vec2, vec3); vec4 = (v8i16)__msa_ilvr_b((v16i8)q2, (v16i8)q1); src -= 3; ST4x4_UB(vec2, vec2, 0, 1, 2, 3, src, pitch); ST2x4_UB(vec4, 0, src + 4, pitch); src += (4 * pitch); ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch); ST2x4_UB(vec4, 4, src + 4, pitch); } }
int32_t vp8_mbblock_error_msa(MACROBLOCK *mb, int32_t dc) { BLOCK *be; BLOCKD *bd; int16_t *coeff_ptr, *dq_coeff_ptr; int32_t err = 0; uint32_t loop_cnt; v8i16 coeff, coeff0, coeff1, coeff2, coeff3, coeff4; v8i16 dq_coeff, dq_coeff2, dq_coeff3, dq_coeff4; v4i32 diff0, diff1; v2i64 err0, err1; v16u8 zero = { 0 }; v16u8 mask0 = (v16u8)__msa_ldi_b(255); if (1 == dc) { mask0 = (v16u8)__msa_insve_w((v4i32)mask0, 0, (v4i32)zero); } for (loop_cnt = 0; loop_cnt < 8; loop_cnt++) { be = &mb->block[2 * loop_cnt]; bd = &mb->e_mbd.block[2 * loop_cnt]; coeff_ptr = be->coeff; dq_coeff_ptr = bd->dqcoeff; coeff = LD_SH(coeff_ptr); dq_coeff = LD_SH(dq_coeff_ptr); coeff_ptr += 8; dq_coeff_ptr += 8; coeff2 = LD_SH(coeff_ptr); dq_coeff2 = LD_SH(dq_coeff_ptr); be = &mb->block[2 * loop_cnt + 1]; bd = &mb->e_mbd.block[2 * loop_cnt + 1]; coeff_ptr = be->coeff; dq_coeff_ptr = bd->dqcoeff; coeff3 = LD_SH(coeff_ptr); dq_coeff3 = LD_SH(dq_coeff_ptr); coeff_ptr += 8; dq_coeff_ptr += 8; coeff4 = LD_SH(coeff_ptr); dq_coeff4 = LD_SH(dq_coeff_ptr); ILVRL_H2_SH(coeff, dq_coeff, coeff0, coeff1); HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); diff0 = (v4i32)__msa_bmnz_v(zero, (v16u8)diff0, mask0); DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1); ILVRL_H2_SH(coeff2, dq_coeff2, coeff0, coeff1); HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); DPADD_SD2_SD(diff0, diff1, err0, err1); err0 += __msa_splati_d(err0, 1); err1 += __msa_splati_d(err1, 1); err += __msa_copy_s_d(err0, 0); err += __msa_copy_s_d(err1, 0); ILVRL_H2_SH(coeff3, dq_coeff3, coeff0, coeff1); HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); diff0 = (v4i32)__msa_bmnz_v(zero, (v16u8)diff0, mask0); DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1); ILVRL_H2_SH(coeff4, dq_coeff4, coeff0, coeff1); HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); DPADD_SD2_SD(diff0, diff1, err0, err1); err0 += __msa_splati_d(err0, 1); err1 += __msa_splati_d(err1, 1); err += __msa_copy_s_d(err0, 0); err += __msa_copy_s_d(err1, 0); } return err; }
int32_t vp8_mbuverror_msa(MACROBLOCK *mb) { BLOCK *be; BLOCKD *bd; int16_t *coeff_ptr, *dq_coeff_ptr; int32_t err = 0; uint32_t loop_cnt; v8i16 coeff, coeff0, coeff1, coeff2, coeff3, coeff4; v8i16 dq_coeff, dq_coeff2, dq_coeff3, dq_coeff4; v4i32 diff0, diff1; v2i64 err0, err1, err_dup0, err_dup1; for (loop_cnt = 16; loop_cnt < 24; loop_cnt += 2) { be = &mb->block[loop_cnt]; bd = &mb->e_mbd.block[loop_cnt]; coeff_ptr = be->coeff; dq_coeff_ptr = bd->dqcoeff; coeff = LD_SH(coeff_ptr); dq_coeff = LD_SH(dq_coeff_ptr); coeff_ptr += 8; dq_coeff_ptr += 8; coeff2 = LD_SH(coeff_ptr); dq_coeff2 = LD_SH(dq_coeff_ptr); be = &mb->block[loop_cnt + 1]; bd = &mb->e_mbd.block[loop_cnt + 1]; coeff_ptr = be->coeff; dq_coeff_ptr = bd->dqcoeff; coeff3 = LD_SH(coeff_ptr); dq_coeff3 = LD_SH(dq_coeff_ptr); coeff_ptr += 8; dq_coeff_ptr += 8; coeff4 = LD_SH(coeff_ptr); dq_coeff4 = LD_SH(dq_coeff_ptr); ILVRL_H2_SH(coeff, dq_coeff, coeff0, coeff1); HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1); ILVRL_H2_SH(coeff2, dq_coeff2, coeff0, coeff1); HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); DPADD_SD2_SD(diff0, diff1, err0, err1); err_dup0 = __msa_splati_d(err0, 1); err_dup1 = __msa_splati_d(err1, 1); ADD2(err0, err_dup0, err1, err_dup1, err0, err1); err += __msa_copy_s_d(err0, 0); err += __msa_copy_s_d(err1, 0); ILVRL_H2_SH(coeff3, dq_coeff3, coeff0, coeff1); HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1); ILVRL_H2_SH(coeff4, dq_coeff4, coeff0, coeff1); HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); DPADD_SD2_SD(diff0, diff1, err0, err1); err_dup0 = __msa_splati_d(err0, 1); err_dup1 = __msa_splati_d(err1, 1); ADD2(err0, err_dup0, err1, err_dup1, err0, err1); err += __msa_copy_s_d(err0, 0); err += __msa_copy_s_d(err1, 0); } return err; }
static void mbloop_filter_vertical_edge_uv_msa(uint8_t *src_u, uint8_t *src_v, int32_t pitch, const uint8_t b_limit_in, const uint8_t limit_in, const uint8_t thresh_in) { v16u8 p3, p2, p1, p0, q3, q2, q1, q0; v16u8 mask, hev, flat, thresh, limit, b_limit; v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; v16u8 row9, row10, row11, row12, row13, row14, row15; v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; b_limit = (v16u8)__msa_fill_b(b_limit_in); limit = (v16u8)__msa_fill_b(limit_in); thresh = (v16u8)__msa_fill_b(thresh_in); LD_UB8(src_u - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); LD_UB8(src_v - 4, pitch, row8, row9, row10, row11, row12, row13, row14, row15); TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8, row9, row10, row11, row12, row13, row14, row15, p3, p2, p1, p0, q0, q1, q2, q3); LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat); VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1); ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4); ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1); ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7); ILVRL_B2_SH(q2, q1, tmp2, tmp5); src_u -= 3; VP8_ST6x1_UB(tmp3, 0, tmp2, 0, src_u, 4); src_u += pitch; VP8_ST6x1_UB(tmp3, 1, tmp2, 1, src_u, 4); src_u += pitch; VP8_ST6x1_UB(tmp3, 2, tmp2, 2, src_u, 4); src_u += pitch; VP8_ST6x1_UB(tmp3, 3, tmp2, 3, src_u, 4); src_u += pitch; VP8_ST6x1_UB(tmp4, 0, tmp2, 4, src_u, 4); src_u += pitch; VP8_ST6x1_UB(tmp4, 1, tmp2, 5, src_u, 4); src_u += pitch; VP8_ST6x1_UB(tmp4, 2, tmp2, 6, src_u, 4); src_u += pitch; VP8_ST6x1_UB(tmp4, 3, tmp2, 7, src_u, 4); src_v -= 3; VP8_ST6x1_UB(tmp6, 0, tmp5, 0, src_v, 4); src_v += pitch; VP8_ST6x1_UB(tmp6, 1, tmp5, 1, src_v, 4); src_v += pitch; VP8_ST6x1_UB(tmp6, 2, tmp5, 2, src_v, 4); src_v += pitch; VP8_ST6x1_UB(tmp6, 3, tmp5, 3, src_v, 4); src_v += pitch; VP8_ST6x1_UB(tmp7, 0, tmp5, 4, src_v, 4); src_v += pitch; VP8_ST6x1_UB(tmp7, 1, tmp5, 5, src_v, 4); src_v += pitch; VP8_ST6x1_UB(tmp7, 2, tmp5, 6, src_v, 4); src_v += pitch; VP8_ST6x1_UB(tmp7, 3, tmp5, 7, src_v, 4); }
void ff_vp8_h_loop_filter16_msa(uint8_t *src, ptrdiff_t pitch, int b_limit_in, int limit_in, int thresh_in) { uint8_t *temp_src; v16u8 p3, p2, p1, p0, q3, q2, q1, q0; v16u8 mask, hev, flat, thresh, limit, b_limit; v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; v16u8 row9, row10, row11, row12, row13, row14, row15; v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; b_limit = (v16u8) __msa_fill_b(b_limit_in); limit = (v16u8) __msa_fill_b(limit_in); thresh = (v16u8) __msa_fill_b(thresh_in); temp_src = src - 4; LD_UB8(temp_src, pitch, row0, row1, row2, row3, row4, row5, row6, row7); temp_src += (8 * pitch); LD_UB8(temp_src, pitch, row8, row9, row10, row11, row12, row13, row14, row15); TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8, row9, row10, row11, row12, row13, row14, row15, p3, p2, p1, p0, q0, q1, q2, q3); LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat); VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1); ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4); ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1); ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7); ILVRL_B2_SH(q2, q1, tmp2, tmp5); temp_src = src - 3; VP8_ST6x1_UB(tmp3, 0, tmp2, 0, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp3, 1, tmp2, 1, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp3, 2, tmp2, 2, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp3, 3, tmp2, 3, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp4, 0, tmp2, 4, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp4, 1, tmp2, 5, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp4, 2, tmp2, 6, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp4, 3, tmp2, 7, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp6, 0, tmp5, 0, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp6, 1, tmp5, 1, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp6, 2, tmp5, 2, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp6, 3, tmp5, 3, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp7, 0, tmp5, 4, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp7, 1, tmp5, 5, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp7, 2, tmp5, 6, temp_src, 4); temp_src += pitch; VP8_ST6x1_UB(tmp7, 3, tmp5, 7, temp_src, 4); }