void x264_sub8x8_dct_altivec( int16_t dct[4][4][4], uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 ) { PREP_DIFF; PREP_STORE8_HL; vec_s16_t dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v; vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v, tmp4v, tmp5v, tmp6v, tmp7v; VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct0v ); VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct1v ); VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct2v ); VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct3v ); VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct4v ); VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct5v ); VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct6v ); VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, dct7v ); VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v ); VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v ); VEC_TRANSPOSE_8( tmp0v, tmp1v, tmp2v, tmp3v, tmp4v, tmp5v, tmp6v, tmp7v, dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v ); VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v ); VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v ); VEC_TRANSPOSE_8( tmp0v, tmp1v, tmp2v, tmp3v, tmp4v, tmp5v, tmp6v, tmp7v, dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v ); VEC_STORE8_H( dct0v, dct[0][0] ); VEC_STORE8_L( dct0v, dct[1][0] ); VEC_STORE8_H( dct1v, dct[0][1] ); VEC_STORE8_L( dct1v, dct[1][1] ); VEC_STORE8_H( dct2v, dct[0][2] ); VEC_STORE8_L( dct2v, dct[1][2] ); VEC_STORE8_H( dct3v, dct[0][3] ); VEC_STORE8_L( dct3v, dct[1][3] ); VEC_STORE8_H( dct4v, dct[2][0] ); VEC_STORE8_L( dct4v, dct[3][0] ); VEC_STORE8_H( dct5v, dct[2][1] ); VEC_STORE8_L( dct5v, dct[3][1] ); VEC_STORE8_H( dct6v, dct[2][2] ); VEC_STORE8_L( dct6v, dct[3][2] ); VEC_STORE8_H( dct7v, dct[2][3] ); VEC_STORE8_L( dct7v, dct[3][3] ); }
void x264_add8x8_idct8_altivec( uint8_t *dst, int16_t dct[64] ) { vec_u16_t onev = vec_splat_u16(1); vec_u16_t twov = vec_splat_u16(2); dct[0] += 32; // rounding for the >>6 at the end vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7; s0 = vec_ld(0x00, dct); s1 = vec_ld(0x10, dct); s2 = vec_ld(0x20, dct); s3 = vec_ld(0x30, dct); s4 = vec_ld(0x40, dct); s5 = vec_ld(0x50, dct); s6 = vec_ld(0x60, dct); s7 = vec_ld(0x70, dct); vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7; IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7); vec_s16_t tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7; VEC_TRANSPOSE_8( d0, d1, d2, d3, d4, d5, d6, d7, tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7); vec_s16_t idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7; IDCT8_1D_ALTIVEC(tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7, idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7); vec_u8_t perm_ldv = vec_lvsl(0, dst); vec_u8_t perm_stv = vec_lvsr(8, dst); vec_u16_t sixv = vec_splat_u16(6); const vec_u8_t sel = (vec_u8_t) CV(0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1); LOAD_ZERO; ALTIVEC_STORE_SUM_CLIP(&dst[0*FDEC_STRIDE], idct0, perm_ldv, perm_stv, sel); ALTIVEC_STORE_SUM_CLIP(&dst[1*FDEC_STRIDE], idct1, perm_ldv, perm_stv, sel); ALTIVEC_STORE_SUM_CLIP(&dst[2*FDEC_STRIDE], idct2, perm_ldv, perm_stv, sel); ALTIVEC_STORE_SUM_CLIP(&dst[3*FDEC_STRIDE], idct3, perm_ldv, perm_stv, sel); ALTIVEC_STORE_SUM_CLIP(&dst[4*FDEC_STRIDE], idct4, perm_ldv, perm_stv, sel); ALTIVEC_STORE_SUM_CLIP(&dst[5*FDEC_STRIDE], idct5, perm_ldv, perm_stv, sel); ALTIVEC_STORE_SUM_CLIP(&dst[6*FDEC_STRIDE], idct6, perm_ldv, perm_stv, sel); ALTIVEC_STORE_SUM_CLIP(&dst[7*FDEC_STRIDE], idct7, perm_ldv, perm_stv, sel); }
void x264_sub8x8_dct8_altivec( int16_t dct[64], uint8_t *pix1, uint8_t *pix2 ) { vec_u16_t onev = vec_splat_u16(1); vec_u16_t twov = vec_add( onev, onev ); PREP_DIFF_8BYTEALIGNED; vec_s16_t dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v; VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct0v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct1v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct2v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct3v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct4v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct5v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct6v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct7v ); DCT8_1D_ALTIVEC( dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v ); vec_s16_t dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v, dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v; VEC_TRANSPOSE_8(dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v, dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v, dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v ); DCT8_1D_ALTIVEC( dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v, dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v ); vec_st( dct_tr0v, 0, dct ); vec_st( dct_tr1v, 16, dct ); vec_st( dct_tr2v, 32, dct ); vec_st( dct_tr3v, 48, dct ); vec_st( dct_tr4v, 64, dct ); vec_st( dct_tr5v, 80, dct ); vec_st( dct_tr6v, 96, dct ); vec_st( dct_tr7v, 112, dct ); }
void x264_sub8x8_dct_altivec( int16_t dct[4][4][4], uint8_t *pix1, uint8_t *pix2 ) { PREP_DIFF_8BYTEALIGNED; vec_s16_t dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v; vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v, tmp4v, tmp5v, tmp6v, tmp7v; vec_u8_t permHighv, permLowv; VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct0v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct1v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct2v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct3v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct4v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct5v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct6v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct7v ); VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v ); VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v ); VEC_TRANSPOSE_8( tmp0v, tmp1v, tmp2v, tmp3v, tmp4v, tmp5v, tmp6v, tmp7v, dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v ); permHighv = (vec_u8_t) CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17); permLowv = (vec_u8_t) CV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F); VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v ); VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v ); vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, (int16_t*)dct); vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, (int16_t*)dct); vec_st(vec_perm(tmp4v, tmp5v, permHighv), 32, (int16_t*)dct); vec_st(vec_perm(tmp6v, tmp7v, permHighv), 48, (int16_t*)dct); vec_st(vec_perm(tmp0v, tmp1v, permLowv), 64, (int16_t*)dct); vec_st(vec_perm(tmp2v, tmp3v, permLowv), 80, (int16_t*)dct); vec_st(vec_perm(tmp4v, tmp5v, permLowv), 96, (int16_t*)dct); vec_st(vec_perm(tmp6v, tmp7v, permLowv), 112, (int16_t*)dct); }
void x264_sub16x16_dct_altivec( int16_t dct[16][4][4], uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 ) { PREP_DIFF; PREP_STORE8_HL; vec_s16_t dcth0v, dcth1v, dcth2v, dcth3v, dcth4v, dcth5v, dcth6v, dcth7v, dctl0v, dctl1v, dctl2v, dctl3v, dctl4v, dctl5v, dctl6v, dctl7v; vec_s16_t temp0v, temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v; VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth0v, dctl0v ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth1v, dctl1v ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth2v, dctl2v ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth3v, dctl3v ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth4v, dctl4v ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth5v, dctl5v ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth6v, dctl6v ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth7v, dctl7v ); VEC_DCT( dcth0v, dcth1v, dcth2v, dcth3v, temp0v, temp1v, temp2v, temp3v ); VEC_DCT( dcth4v, dcth5v, dcth6v, dcth7v, temp4v, temp5v, temp6v, temp7v ); VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, dcth0v, dcth1v, dcth2v, dcth3v, dcth4v, dcth5v, dcth6v, dcth7v ); VEC_DCT( dcth0v, dcth1v, dcth2v, dcth3v, temp0v, temp1v, temp2v, temp3v ); VEC_DCT( dcth4v, dcth5v, dcth6v, dcth7v, temp4v, temp5v, temp6v, temp7v ); VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, dcth0v, dcth1v, dcth2v, dcth3v, dcth4v, dcth5v, dcth6v, dcth7v ); VEC_STORE8_H( dcth0v, dct[0][0] ); VEC_STORE8_L( dcth0v, dct[1][0] ); VEC_STORE8_H( dcth1v, dct[0][1] ); VEC_STORE8_L( dcth1v, dct[1][1] ); VEC_STORE8_H( dcth2v, dct[0][2] ); VEC_STORE8_L( dcth2v, dct[1][2] ); VEC_STORE8_H( dcth3v, dct[0][3] ); VEC_STORE8_L( dcth3v, dct[1][3] ); VEC_STORE8_H( dcth4v, dct[2][0] ); VEC_STORE8_L( dcth4v, dct[3][0] ); VEC_STORE8_H( dcth5v, dct[2][1] ); VEC_STORE8_L( dcth5v, dct[3][1] ); VEC_STORE8_H( dcth6v, dct[2][2] ); VEC_STORE8_L( dcth6v, dct[3][2] ); VEC_STORE8_H( dcth7v, dct[2][3] ); VEC_STORE8_L( dcth7v, dct[3][3] ); VEC_DCT( dctl0v, dctl1v, dctl2v, dctl3v, temp0v, temp1v, temp2v, temp3v ); VEC_DCT( dctl4v, dctl5v, dctl6v, dctl7v, temp4v, temp5v, temp6v, temp7v ); VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, dctl0v, dctl1v, dctl2v, dctl3v, dctl4v, dctl5v, dctl6v, dctl7v ); VEC_DCT( dctl0v, dctl1v, dctl2v, dctl3v, temp0v, temp1v, temp2v, temp3v ); VEC_DCT( dctl4v, dctl5v, dctl6v, dctl7v, temp4v, temp5v, temp6v, temp7v ); VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, dctl0v, dctl1v, dctl2v, dctl3v, dctl4v, dctl5v, dctl6v, dctl7v ); VEC_STORE8_H( dctl0v, dct[4][0] ); VEC_STORE8_L( dctl0v, dct[5][0] ); VEC_STORE8_H( dctl1v, dct[4][1] ); VEC_STORE8_L( dctl1v, dct[5][1] ); VEC_STORE8_H( dctl2v, dct[4][2] ); VEC_STORE8_L( dctl2v, dct[5][2] ); VEC_STORE8_H( dctl3v, dct[4][3] ); VEC_STORE8_L( dctl3v, dct[5][3] ); VEC_STORE8_H( dctl4v, dct[6][0] ); VEC_STORE8_L( dctl4v, dct[7][0] ); VEC_STORE8_H( dctl5v, dct[6][1] ); VEC_STORE8_L( dctl5v, dct[7][1] ); VEC_STORE8_H( dctl6v, dct[6][2] ); VEC_STORE8_L( dctl6v, dct[7][2] ); VEC_STORE8_H( dctl7v, dct[6][3] ); VEC_STORE8_L( dctl7v, dct[7][3] ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth0v, dctl0v ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth1v, dctl1v ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth2v, dctl2v ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth3v, dctl3v ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth4v, dctl4v ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth5v, dctl5v ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth6v, dctl6v ); VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, dcth7v, dctl7v ); VEC_DCT( dcth0v, dcth1v, dcth2v, dcth3v, temp0v, temp1v, temp2v, temp3v ); VEC_DCT( dcth4v, dcth5v, dcth6v, dcth7v, temp4v, temp5v, temp6v, temp7v ); VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, dcth0v, dcth1v, dcth2v, dcth3v, dcth4v, dcth5v, dcth6v, dcth7v ); VEC_DCT( dcth0v, dcth1v, dcth2v, dcth3v, temp0v, temp1v, temp2v, temp3v ); VEC_DCT( dcth4v, dcth5v, dcth6v, dcth7v, temp4v, temp5v, temp6v, temp7v ); VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, dcth0v, dcth1v, dcth2v, dcth3v, dcth4v, dcth5v, dcth6v, dcth7v ); VEC_STORE8_H( dcth0v, dct[8][0] ); VEC_STORE8_L( dcth0v, dct[9][0] ); VEC_STORE8_H( dcth1v, dct[8][1] ); VEC_STORE8_L( dcth1v, dct[9][1] ); VEC_STORE8_H( dcth2v, dct[8][2] ); VEC_STORE8_L( dcth2v, dct[9][2] ); VEC_STORE8_H( dcth3v, dct[8][3] ); VEC_STORE8_L( dcth3v, dct[9][3] ); VEC_STORE8_H( dcth4v, dct[10][0] ); VEC_STORE8_L( dcth4v, dct[11][0] ); VEC_STORE8_H( dcth5v, dct[10][1] ); VEC_STORE8_L( dcth5v, dct[11][1] ); VEC_STORE8_H( dcth6v, dct[10][2] ); VEC_STORE8_L( dcth6v, dct[11][2] ); VEC_STORE8_H( dcth7v, dct[10][3] ); VEC_STORE8_L( dcth7v, dct[11][3] ); VEC_DCT( dctl0v, dctl1v, dctl2v, dctl3v, temp0v, temp1v, temp2v, temp3v ); VEC_DCT( dctl4v, dctl5v, dctl6v, dctl7v, temp4v, temp5v, temp6v, temp7v ); VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, dctl0v, dctl1v, dctl2v, dctl3v, dctl4v, dctl5v, dctl6v, dctl7v ); VEC_DCT( dctl0v, dctl1v, dctl2v, dctl3v, temp0v, temp1v, temp2v, temp3v ); VEC_DCT( dctl4v, dctl5v, dctl6v, dctl7v, temp4v, temp5v, temp6v, temp7v ); VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, dctl0v, dctl1v, dctl2v, dctl3v, dctl4v, dctl5v, dctl6v, dctl7v ); VEC_STORE8_H( dctl0v, dct[12][0] ); VEC_STORE8_L( dctl0v, dct[13][0] ); VEC_STORE8_H( dctl1v, dct[12][1] ); VEC_STORE8_L( dctl1v, dct[13][1] ); VEC_STORE8_H( dctl2v, dct[12][2] ); VEC_STORE8_L( dctl2v, dct[13][2] ); VEC_STORE8_H( dctl3v, dct[12][3] ); VEC_STORE8_L( dctl3v, dct[13][3] ); VEC_STORE8_H( dctl4v, dct[14][0] ); VEC_STORE8_L( dctl4v, dct[15][0] ); VEC_STORE8_H( dctl5v, dct[14][1] ); VEC_STORE8_L( dctl5v, dct[15][1] ); VEC_STORE8_H( dctl6v, dct[14][2] ); VEC_STORE8_L( dctl6v, dct[15][2] ); VEC_STORE8_H( dctl7v, dct[14][3] ); VEC_STORE8_L( dctl7v, dct[15][3] ); }