void x264_sub4x4_dct_altivec( int16_t dct[4][4], uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 ) { PREP_DIFF; PREP_STORE8; vec_s16_t dct0v, dct1v, dct2v, dct3v; vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v; VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, dct0v ); VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, dct1v ); VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, dct2v ); VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, dct3v ); VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v ); VEC_TRANSPOSE_4( tmp0v, tmp1v, tmp2v, tmp3v, dct0v, dct1v, dct2v, dct3v ); VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v ); VEC_TRANSPOSE_4( tmp0v, tmp1v, tmp2v, tmp3v, dct0v, dct1v, dct2v, dct3v ); VEC_STORE8( dct0v, dct[0] ); VEC_STORE8( dct1v, dct[1] ); VEC_STORE8( dct2v, dct[2] ); VEC_STORE8( dct3v, dct[3] ); }
static void predict_8x8c_p_altivec( uint8_t *src ) { int H = 0, V = 0; for( int i = 0; i < 4; i++ ) { H += ( i + 1 ) * ( src[4+i - FDEC_STRIDE] - src[2 - i -FDEC_STRIDE] ); V += ( i + 1 ) * ( src[-1 +(i+4)*FDEC_STRIDE] - src[-1+(2-i)*FDEC_STRIDE] ); } int a = 16 * ( src[-1+7*FDEC_STRIDE] + src[7 - FDEC_STRIDE] ); int b = ( 17 * H + 16 ) >> 5; int c = ( 17 * V + 16 ) >> 5; int i00 = a -3*b -3*c + 16; vec_s16_u i00_u, b_u, c_u; i00_u.s[0] = i00; b_u.s[0] = b; c_u.s[0] = c; vec_u16_t val5_v = vec_splat_u16(5); vec_s16_t i00_v, b_v, c_v; i00_v = vec_splat(i00_u.v, 0); b_v = vec_splat(b_u.v, 0); c_v = vec_splat(c_u.v, 0); vec_s16_t induc_v = (vec_s16_t) CV(0, 1, 2, 3, 4, 5, 6, 7); vec_s16_t add_i0_b_0v = vec_mladd(induc_v, b_v, i00_v); PREP_STORE8; for( int i = 0; i < 8; ++i ) { vec_s16_t shift_0_v = vec_sra(add_i0_b_0v, val5_v); vec_u8_t com_sat_v = vec_packsu(shift_0_v, shift_0_v); VEC_STORE8(com_sat_v, &src[0]); src += FDEC_STRIDE; add_i0_b_0v = vec_adds(add_i0_b_0v, c_v); } }