void x264_add4x4_idct_altivec( uint8_t *dst, int16_t dct[16] ) { vec_u16_t onev = vec_splat_u16(1); dct[0] += 32; // rounding for the >>6 at the end vec_s16_t s0, s1, s2, s3; s0 = vec_ld( 0x00, dct ); s1 = vec_sld( s0, s0, 8 ); s2 = vec_ld( 0x10, dct ); s3 = vec_sld( s2, s2, 8 ); vec_s16_t d0, d1, d2, d3; IDCT_1D_ALTIVEC( s0, s1, s2, s3, d0, d1, d2, d3 ); vec_s16_t tr0, tr1, tr2, tr3; VEC_TRANSPOSE_4( d0, d1, d2, d3, tr0, tr1, tr2, tr3 ); vec_s16_t idct0, idct1, idct2, idct3; IDCT_1D_ALTIVEC( tr0, tr1, tr2, tr3, idct0, idct1, idct2, idct3 ); vec_u8_t perm_ldv = vec_lvsl( 0, dst ); vec_u16_t sixv = vec_splat_u16(6); LOAD_ZERO; ALTIVEC_STORE4_SUM_CLIP( &dst[0*FDEC_STRIDE], idct0, perm_ldv ); ALTIVEC_STORE4_SUM_CLIP( &dst[1*FDEC_STRIDE], idct1, perm_ldv ); ALTIVEC_STORE4_SUM_CLIP( &dst[2*FDEC_STRIDE], idct2, perm_ldv ); ALTIVEC_STORE4_SUM_CLIP( &dst[3*FDEC_STRIDE], idct3, perm_ldv ); }
void x264_sub4x4_dct_altivec( int16_t dct[4][4], uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 ) { PREP_DIFF; PREP_STORE8; vec_s16_t dct0v, dct1v, dct2v, dct3v; vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v; VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, dct0v ); VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, dct1v ); VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, dct2v ); VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, dct3v ); VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v ); VEC_TRANSPOSE_4( tmp0v, tmp1v, tmp2v, tmp3v, dct0v, dct1v, dct2v, dct3v ); VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v ); VEC_TRANSPOSE_4( tmp0v, tmp1v, tmp2v, tmp3v, dct0v, dct1v, dct2v, dct3v ); VEC_STORE8( dct0v, dct[0] ); VEC_STORE8( dct1v, dct[1] ); VEC_STORE8( dct2v, dct[2] ); VEC_STORE8( dct3v, dct[3] ); }
void x264_sub4x4_dct_altivec( int16_t dct[16], uint8_t *pix1, uint8_t *pix2 ) { PREP_DIFF_8BYTEALIGNED; vec_s16_t dct0v, dct1v, dct2v, dct3v; vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v; vec_u8_t permHighv; VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct0v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct1v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct2v ); VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct3v ); VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v ); VEC_TRANSPOSE_4( tmp0v, tmp1v, tmp2v, tmp3v, dct0v, dct1v, dct2v, dct3v ); permHighv = (vec_u8_t) CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17); VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v ); vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, dct); vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, dct); }
static void h264_idct_add_altivec(uint8_t *dst, int16_t *block, int stride) { vec_s16 va0, va1, va2, va3; vec_s16 vz0, vz1, vz2, vz3; vec_s16 vtmp0, vtmp1, vtmp2, vtmp3; vec_u8 va_u8; vec_u32 va_u32; vec_s16 vdst_ss; const vec_u16 v6us = vec_splat_u16(6); vec_u8 vdst, vdst_orig; vec_u8 vdst_mask = vec_lvsl(0, dst); int element = ((unsigned long)dst & 0xf) >> 2; LOAD_ZERO; block[0] += 32; /* add 32 as a DC-level for rounding */ vtmp0 = vec_ld(0,block); vtmp1 = vec_sld(vtmp0, vtmp0, 8); vtmp2 = vec_ld(16,block); vtmp3 = vec_sld(vtmp2, vtmp2, 8); memset(block, 0, 16 * sizeof(int16_t)); VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3); VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3); VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3); va0 = vec_sra(va0,v6us); va1 = vec_sra(va1,v6us); va2 = vec_sra(va2,v6us); va3 = vec_sra(va3,v6us); VEC_LOAD_U8_ADD_S16_STORE_U8(va0); dst += stride; VEC_LOAD_U8_ADD_S16_STORE_U8(va1); dst += stride; VEC_LOAD_U8_ADD_S16_STORE_U8(va2); dst += stride; VEC_LOAD_U8_ADD_S16_STORE_U8(va3); }
static void ff_h264_idct_add_altivec(uint8_t *dst, DCTELEM *block, int stride) { vec_s16 va0, va1, va2, va3;//s16 vec_s16 vz0, vz1, vz2, vz3;//s16 vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;//s16 vec_u8 va_u8;//u8 vec_u32 va_u32;//u32 vec_s16 vdst_ss;//s16 const vec_u16 v6us = __vsplth(6);//u16 vec_u8 vdst, vdst_orig;//u8 vec_u8 vdst_mask = __lvsl(dst,0);//u8 int element = ((unsigned long)dst & 0xf) >> 2; LOAD_ZERO; block[0] += 32; /* add 32 as a DC-level for rounding */ vtmp0 = __lvx(block,0); vtmp1 = __vsldoi(vtmp0, vtmp0, 8); vtmp2 = __lvx(block,16); vtmp3 = __vsldoi(vtmp2, vtmp2, 8); VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3); VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3); VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3); va0 = __vsrah(va0,v6us); va1 = __vsrah(va1,v6us); va2 = __vsrah(va2,v6us); va3 = __vsrah(va3,v6us); VEC_LOAD_U8_ADD_S16_STORE_U8(va0); dst += stride; VEC_LOAD_U8_ADD_S16_STORE_U8(va1); dst += stride; VEC_LOAD_U8_ADD_S16_STORE_U8(va2); dst += stride; VEC_LOAD_U8_ADD_S16_STORE_U8(va3); }