uint32_t sse8_16bit_altivec_c(const int16_t * b1, const int16_t * b2, const uint32_t stride) { register vector signed short b1_vec; register vector signed short b2_vec; register vector signed short diff; register vector signed int sum; uint32_t result; /* initialize */ sum = vec_splat_s32(0); SSE8_16BIT(); SSE8_16BIT(); SSE8_16BIT(); SSE8_16BIT(); SSE8_16BIT(); SSE8_16BIT(); SSE8_16BIT(); SSE8_16BIT(); /* sum the vector */ sum = vec_sums(sum, vec_splat_s32(0)); sum = vec_splat(sum,3); vec_ste(sum,0,(int*)&result); /* and return */ return result; }
static int32_t scalarproduct_int16_altivec(const int16_t * v1, const int16_t * v2, int order, const int shift) { int i; LOAD_ZERO; register vec_s16 vec1, *pv; register vec_s32 res = vec_splat_s32(0), t; register vec_u32 shifts; int32_t ires; shifts = zero_u32v; if(shift & 0x10) shifts = vec_add(shifts, vec_sl(vec_splat_u32(0x08), vec_splat_u32(0x1))); if(shift & 0x08) shifts = vec_add(shifts, vec_splat_u32(0x08)); if(shift & 0x04) shifts = vec_add(shifts, vec_splat_u32(0x04)); if(shift & 0x02) shifts = vec_add(shifts, vec_splat_u32(0x02)); if(shift & 0x01) shifts = vec_add(shifts, vec_splat_u32(0x01)); for(i = 0; i < order; i += 8){ pv = (vec_s16*)v1; vec1 = vec_perm(pv[0], pv[1], vec_lvsl(0, v1)); t = vec_msum(vec1, vec_ld(0, v2), zero_s32v); t = vec_sr(t, shifts); res = vec_sums(t, res); v1 += 8; v2 += 8; } res = vec_splat(res, 3); vec_ste(res, 0, &ires); return ires; }
uint32_t sad8_altivec_c(const uint8_t * cur, const uint8_t *ref, const uint32_t stride) { uint32_t result = 0; register vector unsigned int sad; register vector unsigned char c; register vector unsigned char r; /* initialize */ sad = vec_splat_u32(0); /* Perform sad operations */ SAD8(); SAD8(); SAD8(); SAD8(); SAD8(); SAD8(); SAD8(); SAD8(); /* finish addition, add the first 2 together */ sad = vec_and(sad, (vector unsigned int)vec_pack(vec_splat_u16(-1),vec_splat_u16(0))); sad = (vector unsigned int)vec_sums((vector signed int)sad, vec_splat_s32(0)); sad = vec_splat(sad,3); vec_ste(sad, 0, &result); return result; }
void x264_sub8x8_dct_dc_altivec( int16_t dct[4], uint8_t *pix1, uint8_t *pix2 ) { vec_s16_t diff[2]; vec_s32_t sum[2]; vec_s32_t zero32 = vec_splat_s32(0); vec_u8_t mask = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F }; pix_diff( &pix1[0], &pix2[0], diff, 0 ); pix_diff( &pix1[4*FENC_STRIDE], &pix2[4*FDEC_STRIDE], diff, 1 ); sum[0] = vec_sum4s( diff[0], zero32 ); sum[1] = vec_sum4s( diff[1], zero32 ); diff[0] = vec_packs( sum[0], sum[1] ); sum[0] = vec_sum4s( diff[0], zero32 ); diff[0] = vec_packs( sum[0], zero32 ); diff[1] = vec_vsx_ld( 0, dct ); diff[0] = vec_perm( diff[0], diff[1], mask ); vec_vsx_st( diff[0], 0, dct ); /* 2x2 DC transform */ int d0 = dct[0] + dct[1]; int d1 = dct[2] + dct[3]; int d2 = dct[0] - dct[1]; int d3 = dct[2] - dct[3]; dct[0] = d0 + d1; dct[1] = d0 - d1; dct[2] = d2 + d3; dct[3] = d2 - d3; }
static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2, int size) { int i, size16; vector signed char vpix1; vector signed short vpix2, vdiff, vpix1l,vpix1h; union { vector signed int vscore; int32_t score[4]; } u; u.vscore = vec_splat_s32(0); // //XXX lazy way, fix it later #define vec_unaligned_load(b) \ vec_perm(vec_ld(0,b),vec_ld(15,b),vec_lvsl(0, b)); size16 = size >> 4; while(size16) { // score += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]); //load pix1 and the first batch of pix2 vpix1 = vec_unaligned_load(pix1); vpix2 = vec_unaligned_load(pix2); pix2 += 8; //unpack vpix1h = vec_unpackh(vpix1); vdiff = vec_sub(vpix1h, vpix2); vpix1l = vec_unpackl(vpix1); // load another batch from pix2 vpix2 = vec_unaligned_load(pix2); u.vscore = vec_msum(vdiff, vdiff, u.vscore); vdiff = vec_sub(vpix1l, vpix2); u.vscore = vec_msum(vdiff, vdiff, u.vscore); pix1 += 16; pix2 += 8; size16--; } u.vscore = vec_sums(u.vscore, vec_splat_s32(0)); size %= 16; for (i = 0; i < size; i++) { u.score[3] += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]); } return u.score[3]; }
uint32_t sad16bi_altivec_c(vector unsigned char *cur, vector unsigned char *ref1, vector unsigned char *ref2, uint32_t stride) { vector unsigned char t1, t2; vector unsigned char mask1, mask2; vector unsigned char sad; vector unsigned int sum; uint32_t result; #ifdef DEBUG /* print alignment errors if this is on */ if((long)cur & 0xf) fprintf(stderr, "sad16bi_altivec:incorrect align, cur: %lx\n", (long)cur); if(stride & 0xf) fprintf(stderr, "sad16bi_altivec:incorrect align, cur: %lu\n", stride); #endif /* Initialisation stuff */ stride >>= 4; mask1 = vec_lvsl(0, (unsigned char*)ref1); mask2 = vec_lvsl(0, (unsigned char*)ref2); sad = vec_splat_u8(0); sum = (vector unsigned int)sad; SAD16BI(); SAD16BI(); SAD16BI(); SAD16BI(); SAD16BI(); SAD16BI(); SAD16BI(); SAD16BI(); SAD16BI(); SAD16BI(); SAD16BI(); SAD16BI(); SAD16BI(); SAD16BI(); SAD16BI(); SAD16BI(); sum = (vector unsigned int)vec_sums((vector signed int)sum, vec_splat_s32(0)); sum = vec_splat(sum, 3); vec_ste(sum, 0, (uint32_t*)&result); return result; }
static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2, int size) { int i, size16 = size >> 4; vector signed char vpix1; vector signed short vpix2, vdiff, vpix1l, vpix1h; union { vector signed int vscore; int32_t score[4]; } u = { .vscore = vec_splat_s32(0) }; while (size16) { // score += (pix1[i] - pix2[i]) * (pix1[i] - pix2[i]); // load pix1 and the first batch of pix2 vpix1 = vec_unaligned_load(pix1); vpix2 = vec_unaligned_load(pix2); pix2 += 8; // unpack vpix1h = vec_unpackh(vpix1); vdiff = vec_sub(vpix1h, vpix2); vpix1l = vec_unpackl(vpix1); // load another batch from pix2 vpix2 = vec_unaligned_load(pix2); u.vscore = vec_msum(vdiff, vdiff, u.vscore); vdiff = vec_sub(vpix1l, vpix2); u.vscore = vec_msum(vdiff, vdiff, u.vscore); pix1 += 16; pix2 += 8; size16--; } u.vscore = vec_sums(u.vscore, vec_splat_s32(0)); size %= 16; for (i = 0; i < size; i++) u.score[3] += (pix1[i] - pix2[i]) * (pix1[i] - pix2[i]); return u.score[3]; } #endif /* HAVE_ALTIVEC */ av_cold void ff_svq1enc_init_ppc(SVQ1EncContext *c) { #if HAVE_ALTIVEC c->ssd_int8_vs_int16 = ssd_int8_vs_int16_altivec; #endif /* HAVE_ALTIVEC */ }
static av_always_inline void put_vp8_epel_h_altivec_core(uint8_t *dst, ptrdiff_t dst_stride, uint8_t *src, ptrdiff_t src_stride, int h, int mx, int w, int is6tap) { LOAD_H_SUBPEL_FILTER(mx-1); vec_u8 align_vec0, align_vec8, permh0, permh8, filt; vec_u8 perm_6tap0, perm_6tap8, perml0, perml8; vec_u8 a, b, pixh, pixl, outer; vec_s16 f16h, f16l; vec_s32 filth, filtl; vec_u8 perm_inner6 = { 1,2,3,4, 2,3,4,5, 3,4,5,6, 4,5,6,7 }; vec_u8 perm_inner4 = { 0,1,2,3, 1,2,3,4, 2,3,4,5, 3,4,5,6 }; vec_u8 perm_inner = is6tap ? perm_inner6 : perm_inner4; vec_u8 perm_outer = { 4,9, 0,5, 5,10, 1,6, 6,11, 2,7, 7,12, 3,8 }; vec_s32 c64 = vec_sl(vec_splat_s32(1), vec_splat_u32(6)); vec_u16 c7 = vec_splat_u16(7); align_vec0 = vec_lvsl( -is6tap-1, src); align_vec8 = vec_lvsl(8-is6tap-1, src); permh0 = vec_perm(align_vec0, align_vec0, perm_inner); permh8 = vec_perm(align_vec8, align_vec8, perm_inner); perm_inner = vec_add(perm_inner, vec_splat_u8(4)); perml0 = vec_perm(align_vec0, align_vec0, perm_inner); perml8 = vec_perm(align_vec8, align_vec8, perm_inner); perm_6tap0 = vec_perm(align_vec0, align_vec0, perm_outer); perm_6tap8 = vec_perm(align_vec8, align_vec8, perm_outer); while (h --> 0) { FILTER_H(f16h, 0); if (w == 16) { FILTER_H(f16l, 8); filt = vec_packsu(f16h, f16l); vec_st(filt, 0, dst); } else { filt = vec_packsu(f16h, f16h); vec_ste((vec_u32)filt, 0, (uint32_t*)dst); if (w == 8) vec_ste((vec_u32)filt, 4, (uint32_t*)dst); } src += src_stride; dst += dst_stride; } }
static int32_t scalarproduct_int16_altivec(const int16_t *v1, const int16_t *v2, int order) { int i; LOAD_ZERO; register vec_s16 vec1; register vec_s32 res = vec_splat_s32(0), t; int32_t ires; for(i = 0; i < order; i += 8){ vec1 = vec_unaligned_load(v1); t = vec_msum(vec1, vec_ld(0, v2), zero_s32v); res = vec_sums(t, res); v1 += 8; v2 += 8; } res = vec_splat(res, 3); vec_ste(res, 0, &ires); return ires; }
scalarproduct_int16_vsx (const signed short *v1, const signed short *v2, int order) { int i; LOAD_ZERO; register vec_s16 vec1; register vec_s32 res = vec_splat_s32 (0), t; signed int ires; for (i = 0; i < order; i += 8) { vec1 = vec_vsx_ld (0, v1); t = vec_msum (vec1, vec_vsx_ld (0, v2), zero_s32v); res = vec_sums (t, res); v1 += 8; v2 += 8; } res = vec_splat (res, 3); vec_ste (res, 0, &ires); return ires; }
static int32_t scalarproduct_int16_altivec(const int16_t *v1, const int16_t *v2, int order) { int i; LOAD_ZERO; const vec_s16 *pv; register vec_s16 vec1; register vec_s32 res = vec_splat_s32(0), t; int32_t ires; for(i = 0; i < order; i += 8){ pv = (const vec_s16*)v1; vec1 = vec_perm(pv[0], pv[1], vec_lvsl(0, v1)); t = vec_msum(vec1, vec_ld(0, v2), zero_s32v); res = vec_sums(t, res); v1 += 8; v2 += 8; } res = vec_splat(res, 3); vec_ste(res, 0, &ires); return ires; }
/** Do inverse transform on 8x4 part of block */ static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, int16_t *block) { vector signed short src0, src1, src2, src3, src4, src5, src6, src7; vector signed int s0, s1, s2, s3, s4, s5, s6, s7; vector signed int s8, s9, sA, sB, sC, sD, sE, sF; vector signed int t0, t1, t2, t3, t4, t5, t6, t7; const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4)); const vector unsigned int vec_7 = vec_splat_u32(7); const vector unsigned int vec_5 = vec_splat_u32(5); const vector unsigned int vec_4 = vec_splat_u32(4); const vector signed int vec_4s = vec_splat_s32(4); const vector unsigned int vec_3 = vec_splat_u32(3); const vector unsigned int vec_2 = vec_splat_u32(2); const vector unsigned int vec_1 = vec_splat_u32(1); vector unsigned char tmp; vector signed short tmp2, tmp3; vector unsigned char perm0, perm1, p0, p1, p; src0 = vec_ld( 0, block); src1 = vec_ld( 16, block); src2 = vec_ld( 32, block); src3 = vec_ld( 48, block); src4 = vec_ld( 64, block); src5 = vec_ld( 80, block); src6 = vec_ld( 96, block); src7 = vec_ld(112, block); TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); s0 = vec_unpackl(src0); s1 = vec_unpackl(src1); s2 = vec_unpackl(src2); s3 = vec_unpackl(src3); s4 = vec_unpackl(src4); s5 = vec_unpackl(src5); s6 = vec_unpackl(src6); s7 = vec_unpackl(src7); s8 = vec_unpackh(src0); s9 = vec_unpackh(src1); sA = vec_unpackh(src2); sB = vec_unpackh(src3); sC = vec_unpackh(src4); sD = vec_unpackh(src5); sE = vec_unpackh(src6); sF = vec_unpackh(src7); STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s); SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7); STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s); SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF); src0 = vec_pack(s8, s0); src1 = vec_pack(s9, s1); src2 = vec_pack(sA, s2); src3 = vec_pack(sB, s3); src4 = vec_pack(sC, s4); src5 = vec_pack(sD, s5); src6 = vec_pack(sE, s6); src7 = vec_pack(sF, s7); TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); s0 = vec_unpackh(src0); s1 = vec_unpackh(src1); s2 = vec_unpackh(src2); s3 = vec_unpackh(src3); s8 = vec_unpackl(src0); s9 = vec_unpackl(src1); sA = vec_unpackl(src2); sB = vec_unpackl(src3); STEP4(s0, s1, s2, s3, vec_64); SHIFT_VERT4(s0, s1, s2, s3); STEP4(s8, s9, sA, sB, vec_64); SHIFT_VERT4(s8, s9, sA, sB); src0 = vec_pack(s0, s8); src1 = vec_pack(s1, s9); src2 = vec_pack(s2, sA); src3 = vec_pack(s3, sB); p0 = vec_lvsl (0, dest); p1 = vec_lvsl (stride, dest); p = vec_splat_u8 (-1); perm0 = vec_mergeh (p, p0); perm1 = vec_mergeh (p, p1); #define ADD(dest,src,perm) \ /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \ tmp = vec_ld (0, dest); \ tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), perm); \ tmp3 = vec_adds (tmp2, src); \ tmp = vec_packsu (tmp3, tmp3); \ vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest); \ vec_ste ((vector unsigned int)tmp, 4, (unsigned int *)dest); ADD (dest, src0, perm0) dest += stride; ADD (dest, src1, perm1) dest += stride; ADD (dest, src2, perm0) dest += stride; ADD (dest, src3, perm1) }
/* this code assume that stride % 16 == 0 */ void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) { signed int ABCD[4] __attribute__((aligned(16))) = {((8 - x) * (8 - y)), ((x) * (8 - y)), ((8 - x) * (y)), ((x) * (y))}; register int i; vector unsigned char fperm; const vector signed int vABCD = vec_ld(0, ABCD); const vector signed short vA = vec_splat((vector signed short)vABCD, 1); const vector signed short vB = vec_splat((vector signed short)vABCD, 3); const vector signed short vC = vec_splat((vector signed short)vABCD, 5); const vector signed short vD = vec_splat((vector signed short)vABCD, 7); const vector signed int vzero = vec_splat_s32(0); const vector signed short v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4)); const vector unsigned short v6us = vec_splat_u16(6); register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0; vector unsigned char vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1; vector unsigned char vsrc0uc, vsrc1uc; vector signed short vsrc0ssH, vsrc1ssH; vector unsigned char vsrcCuc, vsrc2uc, vsrc3uc; vector signed short vsrc2ssH, vsrc3ssH, psum; vector unsigned char vdst, ppsum, fsum; if (((unsigned long)dst) % 16 == 0) { fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F); } else { fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F); } vsrcAuc = vec_ld(0, src); if (loadSecond) vsrcBuc = vec_ld(16, src); vsrcperm0 = vec_lvsl(0, src); vsrcperm1 = vec_lvsl(1, src); vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0); if (reallyBadAlign) vsrc1uc = vsrcBuc; else vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1); vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc0uc); vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc1uc); if (!loadSecond) {// -> !reallyBadAlign for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1); vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc); vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc); psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); psum = vec_mladd(vB, vsrc1ssH, psum); psum = vec_mladd(vC, vsrc2ssH, psum); psum = vec_mladd(vD, vsrc3ssH, psum); psum = vec_add(v28ss, psum); psum = vec_sra(psum, v6us); vdst = vec_ld(0, dst); ppsum = (vector unsigned char)vec_packsu(psum, psum); fsum = vec_perm(vdst, ppsum, fperm); vec_st(fsum, 0, dst); vsrc0ssH = vsrc2ssH; vsrc1ssH = vsrc3ssH; dst += stride; src += stride; } } else { vector unsigned char vsrcDuc; for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrcDuc = vec_ld(stride + 16, src); vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); if (reallyBadAlign) vsrc3uc = vsrcDuc; else vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1); vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc); vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc); psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); psum = vec_mladd(vB, vsrc1ssH, psum); psum = vec_mladd(vC, vsrc2ssH, psum); psum = vec_mladd(vD, vsrc3ssH, psum); psum = vec_add(v28ss, psum); psum = vec_sr(psum, v6us); vdst = vec_ld(0, dst); ppsum = (vector unsigned char)vec_pack(psum, psum); fsum = vec_perm(vdst, ppsum, fperm); vec_st(fsum, 0, dst); vsrc0ssH = vsrc2ssH; vsrc1ssH = vsrc3ssH; dst += stride; src += stride; } } }
/* this code assume stride % 16 == 0 */ static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) { POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_v_lowpass_num, 1); POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1); register int i; const vector signed int vzero = vec_splat_s32(0); const vector unsigned char perm = vec_lvsl(0, src); const vector signed short v20ss = (const vector signed short)AVV(20); const vector unsigned short v5us = vec_splat_u16(5); const vector signed short v5ss = vec_splat_s16(5); const vector signed short v16ss = (const vector signed short)AVV(16); const vector unsigned char dstperm = vec_lvsr(0, dst); const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1); const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm); uint8_t *srcbis = src - (srcStride * 2); const vector unsigned char srcM2a = vec_ld(0, srcbis); const vector unsigned char srcM2b = vec_ld(16, srcbis); const vector unsigned char srcM2 = vec_perm(srcM2a, srcM2b, perm); srcbis += srcStride; const vector unsigned char srcM1a = vec_ld(0, srcbis); const vector unsigned char srcM1b = vec_ld(16, srcbis); const vector unsigned char srcM1 = vec_perm(srcM1a, srcM1b, perm); srcbis += srcStride; const vector unsigned char srcP0a = vec_ld(0, srcbis); const vector unsigned char srcP0b = vec_ld(16, srcbis); const vector unsigned char srcP0 = vec_perm(srcP0a, srcP0b, perm); srcbis += srcStride; const vector unsigned char srcP1a = vec_ld(0, srcbis); const vector unsigned char srcP1b = vec_ld(16, srcbis); const vector unsigned char srcP1 = vec_perm(srcP1a, srcP1b, perm); srcbis += srcStride; const vector unsigned char srcP2a = vec_ld(0, srcbis); const vector unsigned char srcP2b = vec_ld(16, srcbis); const vector unsigned char srcP2 = vec_perm(srcP2a, srcP2b, perm); srcbis += srcStride; vector signed short srcM2ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2); vector signed short srcM2ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2); vector signed short srcM1ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1); vector signed short srcM1ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1); vector signed short srcP0ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0); vector signed short srcP0ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0); vector signed short srcP1ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1); vector signed short srcP1ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1); vector signed short srcP2ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2); vector signed short srcP2ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2); for (i = 0 ; i < 16 ; i++) { const vector unsigned char srcP3a = vec_ld(0, srcbis); const vector unsigned char srcP3b = vec_ld(16, srcbis); const vector unsigned char srcP3 = vec_perm(srcP3a, srcP3b, perm); const vector signed short srcP3ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3); const vector signed short srcP3ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3); srcbis += srcStride; const vector signed short sum1A = vec_adds(srcP0ssA, srcP1ssA); const vector signed short sum1B = vec_adds(srcP0ssB, srcP1ssB); const vector signed short sum2A = vec_adds(srcM1ssA, srcP2ssA); const vector signed short sum2B = vec_adds(srcM1ssB, srcP2ssB); const vector signed short sum3A = vec_adds(srcM2ssA, srcP3ssA); const vector signed short sum3B = vec_adds(srcM2ssB, srcP3ssB); srcM2ssA = srcM1ssA; srcM2ssB = srcM1ssB; srcM1ssA = srcP0ssA; srcM1ssB = srcP0ssB; srcP0ssA = srcP1ssA; srcP0ssB = srcP1ssB; srcP1ssA = srcP2ssA; srcP1ssB = srcP2ssB; srcP2ssA = srcP3ssA; srcP2ssB = srcP3ssB; const vector signed short pp1A = vec_mladd(sum1A, v20ss, v16ss); const vector signed short pp1B = vec_mladd(sum1B, v20ss, v16ss); const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero); const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero); const vector signed short pp3A = vec_add(sum3A, pp1A); const vector signed short pp3B = vec_add(sum3B, pp1B); const vector signed short psumA = vec_sub(pp3A, pp2A); const vector signed short psumB = vec_sub(pp3B, pp2B); const vector signed short sumA = vec_sra(psumA, v5us); const vector signed short sumB = vec_sra(psumB, v5us); const vector unsigned char sum = vec_packsu(sumA, sumB); const vector unsigned char dst1 = vec_ld(0, dst); const vector unsigned char dst2 = vec_ld(16, dst); const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst)); vector unsigned char fsum; OP_U8_ALTIVEC(fsum, sum, vdst); const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm); const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask); const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask); vec_st(fdst1, 0, dst); vec_st(fdst2, 16, dst); dst += dstStride; } POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1); }
/* this code assume stride % 16 == 0 *and* tmp is properly aligned */ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) { POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_hv_lowpass_num, 1); POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1); register int i; const vector signed int vzero = vec_splat_s32(0); const vector unsigned char permM2 = vec_lvsl(-2, src); const vector unsigned char permM1 = vec_lvsl(-1, src); const vector unsigned char permP0 = vec_lvsl(+0, src); const vector unsigned char permP1 = vec_lvsl(+1, src); const vector unsigned char permP2 = vec_lvsl(+2, src); const vector unsigned char permP3 = vec_lvsl(+3, src); const vector signed short v20ss = (const vector signed short)AVV(20); const vector unsigned int v10ui = vec_splat_u32(10); const vector signed short v5ss = vec_splat_s16(5); const vector signed short v1ss = vec_splat_s16(1); const vector signed int v512si = (const vector signed int)AVV(512); const vector unsigned int v16ui = (const vector unsigned int)AVV(16); register int align = ((((unsigned long)src) - 2) % 16); src -= (2 * srcStride); for (i = 0 ; i < 21 ; i ++) { vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3; vector unsigned char srcR1 = vec_ld(-2, src); vector unsigned char srcR2 = vec_ld(14, src); switch (align) { default: { srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = vec_perm(srcR1, srcR2, permP2); srcP3 = vec_perm(srcR1, srcR2, permP3); } break; case 11: { srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = vec_perm(srcR1, srcR2, permP2); srcP3 = srcR2; } break; case 12: { vector unsigned char srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = srcR2; srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 13: { vector unsigned char srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = srcR2; srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 14: { vector unsigned char srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = srcR2; srcP1 = vec_perm(srcR2, srcR3, permP1); srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 15: { vector unsigned char srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = srcR2; srcP0 = vec_perm(srcR2, srcR3, permP0); srcP1 = vec_perm(srcR2, srcR3, permP1); srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; } const vector signed short srcP0A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0); const vector signed short srcP0B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0); const vector signed short srcP1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1); const vector signed short srcP1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1); const vector signed short srcP2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2); const vector signed short srcP2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2); const vector signed short srcP3A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3); const vector signed short srcP3B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3); const vector signed short srcM1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1); const vector signed short srcM1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1); const vector signed short srcM2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2); const vector signed short srcM2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2); const vector signed short sum1A = vec_adds(srcP0A, srcP1A); const vector signed short sum1B = vec_adds(srcP0B, srcP1B); const vector signed short sum2A = vec_adds(srcM1A, srcP2A); const vector signed short sum2B = vec_adds(srcM1B, srcP2B); const vector signed short sum3A = vec_adds(srcM2A, srcP3A); const vector signed short sum3B = vec_adds(srcM2B, srcP3B); const vector signed short pp1A = vec_mladd(sum1A, v20ss, sum3A); const vector signed short pp1B = vec_mladd(sum1B, v20ss, sum3B); const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero); const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero); const vector signed short psumA = vec_sub(pp1A, pp2A); const vector signed short psumB = vec_sub(pp1B, pp2B); vec_st(psumA, 0, tmp); vec_st(psumB, 16, tmp); src += srcStride; tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */ } const vector unsigned char dstperm = vec_lvsr(0, dst); const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1); const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm); const vector unsigned char mperm = (const vector unsigned char) AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B, 0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F); int16_t *tmpbis = tmp - (tmpStride * 21); vector signed short tmpM2ssA = vec_ld(0, tmpbis); vector signed short tmpM2ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; vector signed short tmpM1ssA = vec_ld(0, tmpbis); vector signed short tmpM1ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; vector signed short tmpP0ssA = vec_ld(0, tmpbis); vector signed short tmpP0ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; vector signed short tmpP1ssA = vec_ld(0, tmpbis); vector signed short tmpP1ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; vector signed short tmpP2ssA = vec_ld(0, tmpbis); vector signed short tmpP2ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; for (i = 0 ; i < 16 ; i++) { const vector signed short tmpP3ssA = vec_ld(0, tmpbis); const vector signed short tmpP3ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; const vector signed short sum1A = vec_adds(tmpP0ssA, tmpP1ssA); const vector signed short sum1B = vec_adds(tmpP0ssB, tmpP1ssB); const vector signed short sum2A = vec_adds(tmpM1ssA, tmpP2ssA); const vector signed short sum2B = vec_adds(tmpM1ssB, tmpP2ssB); const vector signed short sum3A = vec_adds(tmpM2ssA, tmpP3ssA); const vector signed short sum3B = vec_adds(tmpM2ssB, tmpP3ssB); tmpM2ssA = tmpM1ssA; tmpM2ssB = tmpM1ssB; tmpM1ssA = tmpP0ssA; tmpM1ssB = tmpP0ssB; tmpP0ssA = tmpP1ssA; tmpP0ssB = tmpP1ssB; tmpP1ssA = tmpP2ssA; tmpP1ssB = tmpP2ssB; tmpP2ssA = tmpP3ssA; tmpP2ssB = tmpP3ssB; const vector signed int pp1Ae = vec_mule(sum1A, v20ss); const vector signed int pp1Ao = vec_mulo(sum1A, v20ss); const vector signed int pp1Be = vec_mule(sum1B, v20ss); const vector signed int pp1Bo = vec_mulo(sum1B, v20ss); const vector signed int pp2Ae = vec_mule(sum2A, v5ss); const vector signed int pp2Ao = vec_mulo(sum2A, v5ss); const vector signed int pp2Be = vec_mule(sum2B, v5ss); const vector signed int pp2Bo = vec_mulo(sum2B, v5ss); const vector signed int pp3Ae = vec_sra((vector signed int)sum3A, v16ui); const vector signed int pp3Ao = vec_mulo(sum3A, v1ss); const vector signed int pp3Be = vec_sra((vector signed int)sum3B, v16ui); const vector signed int pp3Bo = vec_mulo(sum3B, v1ss); const vector signed int pp1cAe = vec_add(pp1Ae, v512si); const vector signed int pp1cAo = vec_add(pp1Ao, v512si); const vector signed int pp1cBe = vec_add(pp1Be, v512si); const vector signed int pp1cBo = vec_add(pp1Bo, v512si); const vector signed int pp32Ae = vec_sub(pp3Ae, pp2Ae); const vector signed int pp32Ao = vec_sub(pp3Ao, pp2Ao); const vector signed int pp32Be = vec_sub(pp3Be, pp2Be); const vector signed int pp32Bo = vec_sub(pp3Bo, pp2Bo); const vector signed int sumAe = vec_add(pp1cAe, pp32Ae); const vector signed int sumAo = vec_add(pp1cAo, pp32Ao); const vector signed int sumBe = vec_add(pp1cBe, pp32Be); const vector signed int sumBo = vec_add(pp1cBo, pp32Bo); const vector signed int ssumAe = vec_sra(sumAe, v10ui); const vector signed int ssumAo = vec_sra(sumAo, v10ui); const vector signed int ssumBe = vec_sra(sumBe, v10ui); const vector signed int ssumBo = vec_sra(sumBo, v10ui); const vector signed short ssume = vec_packs(ssumAe, ssumBe); const vector signed short ssumo = vec_packs(ssumAo, ssumBo); const vector unsigned char sumv = vec_packsu(ssume, ssumo); const vector unsigned char sum = vec_perm(sumv, sumv, mperm); const vector unsigned char dst1 = vec_ld(0, dst); const vector unsigned char dst2 = vec_ld(16, dst); const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst)); vector unsigned char fsum; OP_U8_ALTIVEC(fsum, sum, vdst); const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm); const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask); const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask); vec_st(fdst1, 0, dst); vec_st(fdst2, 16, dst); dst += dstStride; } POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1); }
/* this code assume that stride % 16 == 0 */ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) { POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1); POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1); signed int ABCD[4] __attribute__((aligned(16))); register int i; ABCD[0] = ((8 - x) * (8 - y)); ABCD[1] = ((x) * (8 - y)); ABCD[2] = ((8 - x) * (y)); ABCD[3] = ((x) * (y)); const vector signed int vABCD = vec_ld(0, ABCD); const vector signed short vA = vec_splat((vector signed short)vABCD, 1); const vector signed short vB = vec_splat((vector signed short)vABCD, 3); const vector signed short vC = vec_splat((vector signed short)vABCD, 5); const vector signed short vD = vec_splat((vector signed short)vABCD, 7); const vector signed int vzero = vec_splat_s32(0); const vector signed short v32ss = (const vector signed short)AVV(32); const vector unsigned short v6us = vec_splat_u16(6); vector unsigned char fperm; if (((unsigned long)dst) % 16 == 0) { fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F); } else { fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F); } register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0; vector unsigned char vsrcAuc; vector unsigned char vsrcBuc; vector unsigned char vsrcperm0; vector unsigned char vsrcperm1; vsrcAuc = vec_ld(0, src); if (loadSecond) vsrcBuc = vec_ld(16, src); vsrcperm0 = vec_lvsl(0, src); vsrcperm1 = vec_lvsl(1, src); vector unsigned char vsrc0uc; vector unsigned char vsrc1uc; vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0); if (reallyBadAlign) vsrc1uc = vsrcBuc; else vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1); vector signed short vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc0uc); vector signed short vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc1uc); if (!loadSecond) {// -> !reallyBadAlign for (i = 0 ; i < h ; i++) { vector unsigned char vsrcCuc; vsrcCuc = vec_ld(stride + 0, src); vector unsigned char vsrc2uc; vector unsigned char vsrc3uc; vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1); vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc); vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc); vector signed short psum; psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); psum = vec_mladd(vB, vsrc1ssH, psum); psum = vec_mladd(vC, vsrc2ssH, psum); psum = vec_mladd(vD, vsrc3ssH, psum); psum = vec_add(v32ss, psum); psum = vec_sra(psum, v6us); vector unsigned char vdst = vec_ld(0, dst); vector unsigned char ppsum = (vector unsigned char)vec_packsu(psum, psum); vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm); vector unsigned char fsum; OP_U8_ALTIVEC(fsum, vfdst, vdst); vec_st(fsum, 0, dst); vsrc0ssH = vsrc2ssH; vsrc1ssH = vsrc3ssH; dst += stride; src += stride; } } else { for (i = 0 ; i < h ; i++) { vector unsigned char vsrcCuc; vector unsigned char vsrcDuc; vsrcCuc = vec_ld(stride + 0, src); vsrcDuc = vec_ld(stride + 16, src); vector unsigned char vsrc2uc; vector unsigned char vsrc3uc; vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); if (reallyBadAlign) vsrc3uc = vsrcDuc; else vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1); vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc); vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc); vector signed short psum; psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); psum = vec_mladd(vB, vsrc1ssH, psum); psum = vec_mladd(vC, vsrc2ssH, psum); psum = vec_mladd(vD, vsrc3ssH, psum); psum = vec_add(v32ss, psum); psum = vec_sr(psum, v6us); vector unsigned char vdst = vec_ld(0, dst); vector unsigned char ppsum = (vector unsigned char)vec_pack(psum, psum); vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm); vector unsigned char fsum; OP_U8_ALTIVEC(fsum, vfdst, vdst); vec_st(fsum, 0, dst); vsrc0ssH = vsrc2ssH; vsrc1ssH = vsrc3ssH; dst += stride; src += stride; } } POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1); }
/* this code assume stride % 16 == 0 */ static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) { POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1); POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1); register int i; const vector signed int vzero = vec_splat_s32(0); const vector unsigned char permM2 = vec_lvsl(-2, src); const vector unsigned char permM1 = vec_lvsl(-1, src); const vector unsigned char permP0 = vec_lvsl(+0, src); const vector unsigned char permP1 = vec_lvsl(+1, src); const vector unsigned char permP2 = vec_lvsl(+2, src); const vector unsigned char permP3 = vec_lvsl(+3, src); const vector signed short v20ss = (const vector signed short)AVV(20); const vector unsigned short v5us = vec_splat_u16(5); const vector signed short v5ss = vec_splat_s16(5); const vector signed short v16ss = (const vector signed short)AVV(16); const vector unsigned char dstperm = vec_lvsr(0, dst); const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1); const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm); register int align = ((((unsigned long)src) - 2) % 16); for (i = 0 ; i < 16 ; i ++) { vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3; vector unsigned char srcR1 = vec_ld(-2, src); vector unsigned char srcR2 = vec_ld(14, src); switch (align) { default: { srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = vec_perm(srcR1, srcR2, permP2); srcP3 = vec_perm(srcR1, srcR2, permP3); } break; case 11: { srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = vec_perm(srcR1, srcR2, permP2); srcP3 = srcR2; } break; case 12: { vector unsigned char srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = srcR2; srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 13: { vector unsigned char srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = srcR2; srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 14: { vector unsigned char srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = srcR2; srcP1 = vec_perm(srcR2, srcR3, permP1); srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 15: { vector unsigned char srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = srcR2; srcP0 = vec_perm(srcR2, srcR3, permP0); srcP1 = vec_perm(srcR2, srcR3, permP1); srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; } const vector signed short srcP0A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0); const vector signed short srcP0B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0); const vector signed short srcP1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1); const vector signed short srcP1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1); const vector signed short srcP2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2); const vector signed short srcP2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2); const vector signed short srcP3A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3); const vector signed short srcP3B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3); const vector signed short srcM1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1); const vector signed short srcM1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1); const vector signed short srcM2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2); const vector signed short srcM2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2); const vector signed short sum1A = vec_adds(srcP0A, srcP1A); const vector signed short sum1B = vec_adds(srcP0B, srcP1B); const vector signed short sum2A = vec_adds(srcM1A, srcP2A); const vector signed short sum2B = vec_adds(srcM1B, srcP2B); const vector signed short sum3A = vec_adds(srcM2A, srcP3A); const vector signed short sum3B = vec_adds(srcM2B, srcP3B); const vector signed short pp1A = vec_mladd(sum1A, v20ss, v16ss); const vector signed short pp1B = vec_mladd(sum1B, v20ss, v16ss); const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero); const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero); const vector signed short pp3A = vec_add(sum3A, pp1A); const vector signed short pp3B = vec_add(sum3B, pp1B); const vector signed short psumA = vec_sub(pp3A, pp2A); const vector signed short psumB = vec_sub(pp3B, pp2B); const vector signed short sumA = vec_sra(psumA, v5us); const vector signed short sumB = vec_sra(psumB, v5us); const vector unsigned char sum = vec_packsu(sumA, sumB); const vector unsigned char dst1 = vec_ld(0, dst); const vector unsigned char dst2 = vec_ld(16, dst); const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst)); vector unsigned char fsum; OP_U8_ALTIVEC(fsum, sum, vdst); const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm); const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask); const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask); vec_st(fdst1, 0, dst); vec_st(fdst2, 16, dst); src += srcStride; dst += dstStride; } POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1); }
int field_dct_best_altivec(FIELD_DCT_BEST_PDECL) { /* * calculate prediction error (cur-pred) for top (blk0) * and bottom field (blk1) */ double r, d; int sumtop, sumbot, sumsqtop, sumsqbot, sumbottop; int topvar, botvar; int whichdct; int i; vector unsigned char ct, pt, cb, pb; vector unsigned char *ctp, *ptp, *cbp, *pbp; unsigned int offset, stride2; vector signed short cur, pred; vector signed short dift, difb; vector signed int vsumtop, vsumbot, vsumsqtop, vsumsqbot, vsumbottop; vector signed int t0, t1, t2, t3; vector signed int zero; union { vector signed int v; struct { signed int top; signed int bot; signed int sqtop; signed int sqbot; } sum; struct { signed int pad[3]; signed int sum; } bottop; } vo; AMBER_START; #ifdef ALTIVEC_VERIFY if (NOT_VECTOR_ALIGNED(cur_lum_mb)) mjpeg_error_exit1("field_dct_best: cur_lum_mb %% 16 != 0, (%d)\n", cur_lum_mb); if (NOT_VECTOR_ALIGNED(pred_lum_mb)) mjpeg_error_exit1("field_dct_best: pred_lum_mb %% 16 != 0, (%d)\n", pred_lum_mb); if (NOT_VECTOR_ALIGNED(stride)) mjpeg_error_exit1("field_dct_best: stride %% 16 != 0, (%d)\n", stride); #endif zero = vec_splat_s32(0); vsumtop = vec_splat_s32(0); vsumbot = vec_splat_s32(0); vsumsqtop = vec_splat_s32(0); vsumsqbot = vec_splat_s32(0); vsumbottop = vec_splat_s32(0); ctp = (vector unsigned char*) cur_lum_mb; ptp = (vector unsigned char*) pred_lum_mb; cbp = (vector unsigned char*)(cur_lum_mb + stride); pbp = (vector unsigned char*)(pred_lum_mb + stride); offset = 0; stride2 = stride << 1; #if 1 ct = vec_ld(offset, ctp); pt = vec_ld(offset, ptp); cb = vec_ld(offset, cbp); pb = vec_ld(offset, pbp); i = 16/2 - 1; do { cur = (vector signed short)vec_mergeh(vu8(zero), ct); pred = (vector signed short)vec_mergeh(vu8(zero), pt); dift = vec_sub(cur, pred); cur = (vector signed short)vec_mergeh(vu8(zero), cb); pred = (vector signed short)vec_mergeh(vu8(zero), pb); difb = vec_sub(cur, pred); vsumtop = vec_sum4s(dift, vsumtop); vsumbot = vec_sum4s(difb, vsumbot); vsumsqtop = vec_msum(dift, dift, vsumsqtop); vsumsqbot = vec_msum(difb, difb, vsumsqbot); vsumbottop = vec_msum(dift, difb, vsumbottop); cur = (vector signed short)vec_mergel(vu8(zero), ct); pred = (vector signed short)vec_mergel(vu8(zero), pt); dift = vec_sub(cur, pred); cur = (vector signed short)vec_mergel(vu8(zero), cb); pred = (vector signed short)vec_mergel(vu8(zero), pb); difb = vec_sub(cur, pred); offset += stride2; ct = vec_ld(offset, ctp); pt = vec_ld(offset, ptp); cb = vec_ld(offset, cbp); pb = vec_ld(offset, pbp); vsumtop = vec_sum4s(dift, vsumtop); vsumbot = vec_sum4s(difb, vsumbot); vsumsqtop = vec_msum(dift, dift, vsumsqtop); vsumsqbot = vec_msum(difb, difb, vsumsqbot); vsumbottop = vec_msum(dift, difb, vsumbottop); } while (--i); cur = (vector signed short)vec_mergeh(vu8(zero), ct); pred = (vector signed short)vec_mergeh(vu8(zero), pt); dift = vec_sub(cur, pred); cur = (vector signed short)vec_mergeh(vu8(zero), cb); pred = (vector signed short)vec_mergeh(vu8(zero), pb); difb = vec_sub(cur, pred); vsumtop = vec_sum4s(dift, vsumtop); vsumbot = vec_sum4s(difb, vsumbot); vsumsqtop = vec_msum(dift, dift, vsumsqtop); vsumsqbot = vec_msum(difb, difb, vsumsqbot); vsumbottop = vec_msum(dift, difb, vsumbottop); cur = (vector signed short)vec_mergel(vu8(zero), ct); pred = (vector signed short)vec_mergel(vu8(zero), pt); dift = vec_sub(cur, pred); cur = (vector signed short)vec_mergel(vu8(zero), cb); pred = (vector signed short)vec_mergel(vu8(zero), pb); difb = vec_sub(cur, pred); vsumtop = vec_sum4s(dift, vsumtop); vsumbot = vec_sum4s(difb, vsumbot); vsumsqtop = vec_msum(dift, dift, vsumsqtop); vsumsqbot = vec_msum(difb, difb, vsumsqbot); vsumbottop = vec_msum(dift, difb, vsumbottop); #else for (i = 0; i < 16/2; i++) { /* {{{ */ ct = vec_ld(offset, ctp); pt = vec_ld(offset, ptp); cb = vec_ld(offset, cbp); pb = vec_ld(offset, pbp); cur = (vector signed short)vec_mergeh(vu8(zero), ct); pred = (vector signed short)vec_mergeh(vu8(zero), pt); dift = vec_sub(cur, pred); cur = (vector signed short)vec_mergeh(vu8(zero), cb); pred = (vector signed short)vec_mergeh(vu8(zero), pb); difb = vec_sub(cur, pred); vsumtop = vec_sum4s(dift, vsumtop); vsumbot = vec_sum4s(difb, vsumbot); vsumsqtop = vec_msum(dift, dift, vsumsqtop); vsumsqbot = vec_msum(difb, difb, vsumsqbot); vsumbottop = vec_msum(dift, difb, vsumbottop); cur = (vector signed short)vec_mergel(vu8(zero), ct); pred = (vector signed short)vec_mergel(vu8(zero), pt); dift = vec_sub(cur, pred); cur = (vector signed short)vec_mergel(vu8(zero), cb); pred = (vector signed short)vec_mergel(vu8(zero), pb); difb = vec_sub(cur, pred); vsumtop = vec_sum4s(dift, vsumtop); vsumbot = vec_sum4s(difb, vsumbot); vsumsqtop = vec_msum(dift, dift, vsumsqtop); vsumsqbot = vec_msum(difb, difb, vsumsqbot); vsumbottop = vec_msum(dift, difb, vsumbottop); offset += stride2; } /* }}} */ #endif /* transpose [sumtop, sumbot, sumsqtop, sumsqbot] {{{ */ t0 = vec_mergel(vsumtop, vsumsqtop); t1 = vec_mergeh(vsumtop, vsumsqtop); t2 = vec_mergel(vsumbot, vsumsqbot); t3 = vec_mergeh(vsumbot, vsumsqbot); vsumtop = vec_mergeh(t1, t3); vsumbot = vec_mergel(t1, t3); vsumsqtop = vec_mergeh(t0, t2); vsumsqbot = vec_mergel(t0, t2); /* }}} */ /* sum final values for sumtop, sumbot, sumsqtop, sumsqbot */ vsumtop = vec_add(vsumtop, vsumbot); vsumsqtop = vec_add(vsumsqtop, vsumsqbot); vo.v = vec_add(vsumtop, vsumsqtop); sumtop = vo.sum.top; sumbot = vo.sum.bot; sumsqtop = vo.sum.sqtop; sumsqbot = vo.sum.sqbot; vsumbottop = vec_sums(vsumbottop, zero); vo.v = vsumbottop; /* Calculate Variances top and bottom. If they're of similar sign estimate correlation if its good use frame DCT otherwise use field. */ whichdct = FIELD_DCT; r = 0.0; topvar = sumsqtop-sumtop*sumtop/128; botvar = sumsqbot-sumbot*sumbot/128; if (!((topvar > 0) ^ (botvar > 0))) { sumbottop = vo.bottop.sum; d = ((double) topvar) * ((double)botvar); r = (sumbottop-(sumtop*sumbot)/128); if (r > (0.5 * sqrt(d))) whichdct = FRAME_DCT; } AMBER_STOP; return whichdct; }
uint32_t dev16_altivec_c(vector unsigned char *cur, uint32_t stride) { vector unsigned char t2, t3, mn; vector unsigned int mean, dev; vector unsigned int sumdiffs; vector unsigned char *ptr; uint32_t result; #ifdef DEBUG /* print alignment errors if DEBUG is on */ if(((unsigned long)cur) & 0x7) fprintf(stderr, "dev16_altivec:incorrect align, cur: %lx\n", (long)cur); if(stride & 0xf) fprintf(stderr, "dev16_altivec:incorrect align, stride: %lu\n", stride); #endif dev = mean = vec_splat_u32(0); stride >>= 4; /* set pointer to iterate through cur */ ptr = cur; MEAN16(); MEAN16(); MEAN16(); MEAN16(); MEAN16(); MEAN16(); MEAN16(); MEAN16(); MEAN16(); MEAN16(); MEAN16(); MEAN16(); MEAN16(); MEAN16(); MEAN16(); MEAN16(); /* Add all together in sumdiffs */ sumdiffs = (vector unsigned int)vec_sums((vector signed int) mean, vec_splat_s32(0)); /* teilen durch 16 * 16 */ mn = vec_perm((vector unsigned char)sumdiffs, (vector unsigned char)sumdiffs, vec_splat_u8(14)); /* set pointer to iterate through cur */ ptr = cur; DEV16(); DEV16(); DEV16(); DEV16(); DEV16(); DEV16(); DEV16(); DEV16(); DEV16(); DEV16(); DEV16(); DEV16(); DEV16(); DEV16(); DEV16(); DEV16(); /* sum all parts of difference into one 32 bit quantity */ sumdiffs = (vector unsigned int)vec_sums((vector signed int) dev, vec_splat_s32(0)); /* copy vector sum into unaligned result */ sumdiffs = vec_splat(sumdiffs, 3); vec_ste(sumdiffs, 0, (uint32_t*) &result); return result; }
/** Do inverse transform on 8x8 block */ static void vc1_inv_trans_8x8_altivec(int16_t block[64]) { vector signed short src0, src1, src2, src3, src4, src5, src6, src7; vector signed int s0, s1, s2, s3, s4, s5, s6, s7; vector signed int s8, s9, sA, sB, sC, sD, sE, sF; vector signed int t0, t1, t2, t3, t4, t5, t6, t7; const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4)); const vector unsigned int vec_7 = vec_splat_u32(7); const vector unsigned int vec_4 = vec_splat_u32(4); const vector signed int vec_4s = vec_splat_s32(4); const vector unsigned int vec_3 = vec_splat_u32(3); const vector unsigned int vec_2 = vec_splat_u32(2); const vector signed int vec_1s = vec_splat_s32(1); const vector unsigned int vec_1 = vec_splat_u32(1); src0 = vec_ld( 0, block); src1 = vec_ld( 16, block); src2 = vec_ld( 32, block); src3 = vec_ld( 48, block); src4 = vec_ld( 64, block); src5 = vec_ld( 80, block); src6 = vec_ld( 96, block); src7 = vec_ld(112, block); s0 = vec_unpackl(src0); s1 = vec_unpackl(src1); s2 = vec_unpackl(src2); s3 = vec_unpackl(src3); s4 = vec_unpackl(src4); s5 = vec_unpackl(src5); s6 = vec_unpackl(src6); s7 = vec_unpackl(src7); s8 = vec_unpackh(src0); s9 = vec_unpackh(src1); sA = vec_unpackh(src2); sB = vec_unpackh(src3); sC = vec_unpackh(src4); sD = vec_unpackh(src5); sE = vec_unpackh(src6); sF = vec_unpackh(src7); STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s); SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7); STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s); SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF); src0 = vec_pack(s8, s0); src1 = vec_pack(s9, s1); src2 = vec_pack(sA, s2); src3 = vec_pack(sB, s3); src4 = vec_pack(sC, s4); src5 = vec_pack(sD, s5); src6 = vec_pack(sE, s6); src7 = vec_pack(sF, s7); TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); s0 = vec_unpackl(src0); s1 = vec_unpackl(src1); s2 = vec_unpackl(src2); s3 = vec_unpackl(src3); s4 = vec_unpackl(src4); s5 = vec_unpackl(src5); s6 = vec_unpackl(src6); s7 = vec_unpackl(src7); s8 = vec_unpackh(src0); s9 = vec_unpackh(src1); sA = vec_unpackh(src2); sB = vec_unpackh(src3); sC = vec_unpackh(src4); sD = vec_unpackh(src5); sE = vec_unpackh(src6); sF = vec_unpackh(src7); STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64); SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7); STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64); SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF); src0 = vec_pack(s8, s0); src1 = vec_pack(s9, s1); src2 = vec_pack(sA, s2); src3 = vec_pack(sB, s3); src4 = vec_pack(sC, s4); src5 = vec_pack(sD, s5); src6 = vec_pack(sE, s6); src7 = vec_pack(sF, s7); vec_st(src0, 0, block); vec_st(src1, 16, block); vec_st(src2, 32, block); vec_st(src3, 48, block); vec_st(src4, 64, block); vec_st(src5, 80, block); vec_st(src6, 96, block); vec_st(src7,112, block); }
static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) { register int i; LOAD_ZERO; const vec_u8 permM2 = vec_lvsl(-2, src); const vec_u8 permM1 = vec_lvsl(-1, src); const vec_u8 permP0 = vec_lvsl(+0, src); const vec_u8 permP1 = vec_lvsl(+1, src); const vec_u8 permP2 = vec_lvsl(+2, src); const vec_u8 permP3 = vec_lvsl(+3, src); const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2)); const vec_u32 v10ui = vec_splat_u32(10); const vec_s16 v5ss = vec_splat_s16(5); const vec_s16 v1ss = vec_splat_s16(1); const vec_s32 v512si = vec_sl(vec_splat_s32(1),vec_splat_u32(9)); const vec_u32 v16ui = vec_sl(vec_splat_u32(1),vec_splat_u32(4)); register int align = ((((unsigned long)src) - 2) % 16); vec_s16 srcP0A, srcP0B, srcP1A, srcP1B, srcP2A, srcP2B, srcP3A, srcP3B, srcM1A, srcM1B, srcM2A, srcM2B, sum1A, sum1B, sum2A, sum2B, sum3A, sum3B, pp1A, pp1B, pp2A, pp2B, psumA, psumB; const vec_u8 mperm = (const vec_u8) {0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B, 0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F}; int16_t *tmpbis = tmp; vec_s16 tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB, tmpP0ssA, tmpP0ssB, tmpP1ssA, tmpP1ssB, tmpP2ssA, tmpP2ssB; vec_s32 pp1Ae, pp1Ao, pp1Be, pp1Bo, pp2Ae, pp2Ao, pp2Be, pp2Bo, pp3Ae, pp3Ao, pp3Be, pp3Bo, pp1cAe, pp1cAo, pp1cBe, pp1cBo, pp32Ae, pp32Ao, pp32Be, pp32Bo, sumAe, sumAo, sumBe, sumBo, ssumAe, ssumAo, ssumBe, ssumBo; vec_u8 fsum, sumv, sum; vec_s16 ssume, ssumo; src -= (2 * srcStride); for (i = 0 ; i < 21 ; i ++) { vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3; vec_u8 srcR1 = vec_ld(-2, src); vec_u8 srcR2 = vec_ld(14, src); switch (align) { default: { srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = vec_perm(srcR1, srcR2, permP2); srcP3 = vec_perm(srcR1, srcR2, permP3); } break; case 11: { srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = vec_perm(srcR1, srcR2, permP2); srcP3 = srcR2; } break; case 12: { vec_u8 srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = srcR2; srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 13: { vec_u8 srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = srcR2; srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 14: { vec_u8 srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = srcR2; srcP1 = vec_perm(srcR2, srcR3, permP1); srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 15: { vec_u8 srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = srcR2; srcP0 = vec_perm(srcR2, srcR3, permP0); srcP1 = vec_perm(srcR2, srcR3, permP1); srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; } srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0); srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0); srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1); srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1); srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2); srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2); srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3); srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3); srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1); srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1); srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2); srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2); sum1A = vec_adds(srcP0A, srcP1A); sum1B = vec_adds(srcP0B, srcP1B); sum2A = vec_adds(srcM1A, srcP2A); sum2B = vec_adds(srcM1B, srcP2B); sum3A = vec_adds(srcM2A, srcP3A); sum3B = vec_adds(srcM2B, srcP3B); pp1A = vec_mladd(sum1A, v20ss, sum3A); pp1B = vec_mladd(sum1B, v20ss, sum3B); pp2A = vec_mladd(sum2A, v5ss, zero_s16v); pp2B = vec_mladd(sum2B, v5ss, zero_s16v); psumA = vec_sub(pp1A, pp2A); psumB = vec_sub(pp1B, pp2B); vec_st(psumA, 0, tmp); vec_st(psumB, 16, tmp); src += srcStride; tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */ } tmpM2ssA = vec_ld(0, tmpbis); tmpM2ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; tmpM1ssA = vec_ld(0, tmpbis); tmpM1ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; tmpP0ssA = vec_ld(0, tmpbis); tmpP0ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; tmpP1ssA = vec_ld(0, tmpbis); tmpP1ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; tmpP2ssA = vec_ld(0, tmpbis); tmpP2ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; for (i = 0 ; i < 16 ; i++) { const vec_s16 tmpP3ssA = vec_ld(0, tmpbis); const vec_s16 tmpP3ssB = vec_ld(16, tmpbis); const vec_s16 sum1A = vec_adds(tmpP0ssA, tmpP1ssA); const vec_s16 sum1B = vec_adds(tmpP0ssB, tmpP1ssB); const vec_s16 sum2A = vec_adds(tmpM1ssA, tmpP2ssA); const vec_s16 sum2B = vec_adds(tmpM1ssB, tmpP2ssB); const vec_s16 sum3A = vec_adds(tmpM2ssA, tmpP3ssA); const vec_s16 sum3B = vec_adds(tmpM2ssB, tmpP3ssB); tmpbis += tmpStride; tmpM2ssA = tmpM1ssA; tmpM2ssB = tmpM1ssB; tmpM1ssA = tmpP0ssA; tmpM1ssB = tmpP0ssB; tmpP0ssA = tmpP1ssA; tmpP0ssB = tmpP1ssB; tmpP1ssA = tmpP2ssA; tmpP1ssB = tmpP2ssB; tmpP2ssA = tmpP3ssA; tmpP2ssB = tmpP3ssB; pp1Ae = vec_mule(sum1A, v20ss); pp1Ao = vec_mulo(sum1A, v20ss); pp1Be = vec_mule(sum1B, v20ss); pp1Bo = vec_mulo(sum1B, v20ss); pp2Ae = vec_mule(sum2A, v5ss); pp2Ao = vec_mulo(sum2A, v5ss); pp2Be = vec_mule(sum2B, v5ss); pp2Bo = vec_mulo(sum2B, v5ss); pp3Ae = vec_sra((vec_s32)sum3A, v16ui); pp3Ao = vec_mulo(sum3A, v1ss); pp3Be = vec_sra((vec_s32)sum3B, v16ui); pp3Bo = vec_mulo(sum3B, v1ss); pp1cAe = vec_add(pp1Ae, v512si); pp1cAo = vec_add(pp1Ao, v512si); pp1cBe = vec_add(pp1Be, v512si); pp1cBo = vec_add(pp1Bo, v512si); pp32Ae = vec_sub(pp3Ae, pp2Ae); pp32Ao = vec_sub(pp3Ao, pp2Ao); pp32Be = vec_sub(pp3Be, pp2Be); pp32Bo = vec_sub(pp3Bo, pp2Bo); sumAe = vec_add(pp1cAe, pp32Ae); sumAo = vec_add(pp1cAo, pp32Ao); sumBe = vec_add(pp1cBe, pp32Be); sumBo = vec_add(pp1cBo, pp32Bo); ssumAe = vec_sra(sumAe, v10ui); ssumAo = vec_sra(sumAo, v10ui); ssumBe = vec_sra(sumBe, v10ui); ssumBo = vec_sra(sumBo, v10ui); ssume = vec_packs(ssumAe, ssumBe); ssumo = vec_packs(ssumAo, ssumBo); sumv = vec_packsu(ssume, ssumo); sum = vec_perm(sumv, sumv, mperm); ASSERT_ALIGNED(dst); OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst)); vec_st(fsum, 0, dst); dst += dstStride; } }
void iquant_intra_m2_altivec(IQUANT_INTRA_PDECL) { int i; vector signed short vsrc; uint16_t *qmat; vector unsigned short vqmat; vector unsigned short vmquant; vector bool short ltzero; vector signed short val, t0; vector signed short zero; vector unsigned int four; vector signed short min, max; vector signed int vsum; int sum; int offset, offset2; int16_t dst0; union { vector unsigned short vu16; unsigned short mquant; vector signed int vs32; struct { signed int pad[3]; signed int sum; } s; } vu; #ifdef ALTIVEC_DST DataStreamControl dsc; #endif #ifdef ALTIVEC_VERIFY /* {{{ */ if (NOT_VECTOR_ALIGNED(wsp->intra_q_mat)) mjpeg_error_exit1("iquant_intra_m2: wsp->intra_q_mat %% 16 != 0, (%d)", wsp->intra_q_mat); if (NOT_VECTOR_ALIGNED(src)) mjpeg_error_exit1("iquant_intra_m2: src %% 16 != 0, (%d)", src); if (NOT_VECTOR_ALIGNED(dst)) mjpeg_error_exit1("iquant_intra_m2: dst %% 16 != 0, (%d)", dst); for (i = 0; i < 64; i++) if (src[i] < -256 || src[i] > 255) mjpeg_error_exit1("iquant_intra_m2: -256 > src[%i] > 255, (%d)", i, src[i]); #endif /* }}} */ AMBER_START; dst0 = src[0] << (3 - dc_prec); qmat = (uint16_t*)wsp->intra_q_mat; #ifdef ALTIVEC_DST dsc.control = DATA_STREAM_CONTROL(64/8,1,0); vec_dst(src, dsc.control, 0); vec_dst(qmat, dsc.control, 1); #endif /* vmquant = (vector unsigned short)(mquant); */ vu.mquant = (unsigned short)mquant; vmquant = vec_splat(vu.vu16, 0); vsum = vec_splat_s32(0); zero = vec_splat_s16(0); four = vec_splat_u32(4); /* max = (2047); min = (-2048); {{{ */ vu8(max) = vec_splat_u8(0x7); t0 = vec_splat_s16(-1); /* 0xffff */ vu8(max) = vec_mergeh(vu8(max), vu8(t0)); /* 0x07ff == 2047 */ min = vec_sub(t0, max); /* }}} */ offset = 0; #if 1 vsrc = vec_ld(offset, (signed short*)src); vqmat = vec_ld(offset, (unsigned short*)qmat); i = (64/8) - 1; do { /* intra_q[i] * mquant */ vu16(vqmat) = vec_mulo(vu8(vqmat), vu8(vmquant)); /* save sign */ ltzero = vec_cmplt(vsrc, zero); /* val = abs(src) */ t0 = vec_sub(zero, vsrc); val = vec_max(t0, vsrc); /* val = (src * quant) >> 4 */ vs32(t0) = vec_mule(val, vs16(vqmat)); vs32(val) = vec_mulo(val, vs16(vqmat)); vs32(t0) = vec_sra(vs32(t0), four); vs16(t0) = vec_pack(vs32(t0), vs32(t0)); vs32(val) = vec_sra(vs32(val), four); vs16(val) = vec_pack(vs32(val), vs32(val)); val = vec_mergeh(vs16(t0), vs16(val)); offset2 = offset; offset += 8*sizeof(int16_t); vsrc = vec_ld(offset, (signed short*)src); vqmat = vec_ld(offset, (unsigned short*)qmat); /* restore sign */ t0 = vec_sub(zero, val); val = vec_sel(val, t0, ltzero); /* val = (val > 2047) ? ((val < -2048) ? -2048 : val); */ val = vec_min(val, max); val = vec_max(val, min); vsum = vec_sum4s(val, vsum); vec_st(val, offset2, dst); } while (--i); /* intra_q[i] * mquant */ vu16(vqmat) = vec_mulo(vu8(vqmat), vu8(vmquant)); /* save sign */ ltzero = vec_cmplt(vsrc, zero); /* val = abs(src) */ t0 = vec_sub(zero, vsrc); val = vec_max(t0, vsrc); /* val = (src * quant) >> 4 */ vs32(t0) = vec_mule(val, vs16(vqmat)); vs32(val) = vec_mulo(val, vs16(vqmat)); vs32(t0) = vec_sra(vs32(t0), four); vs16(t0) = vec_pack(vs32(t0), vs32(t0)); vs32(val) = vec_sra(vs32(val), four); vs16(val) = vec_pack(vs32(val), vs32(val)); val = vec_mergeh(vs16(t0), vs16(val)); /* restore sign */ t0 = vec_sub(zero, val); val = vec_sel(val, t0, ltzero); /* val = (val > 2047) ? ((val < -2048) ? -2048 : val); */ val = vec_min(val, max); val = vec_max(val, min); vsum = vec_sum4s(val, vsum); vec_st(val, offset, dst); #else /* {{{ */ i = (64/8); do { vsrc = vec_ld(offset, (signed short*)src); vqmat = vec_ld(offset, (unsigned short*)qmat); /* intra_q[i] * mquant */ vu16(vqmat) = vec_mulo(vu8(vqmat), vu8(vmquant)); /* save sign */ ltzero = vec_cmplt(vsrc, zero); /* val = abs(src) */ t0 = vec_sub(zero, vsrc); val = vec_max(t0, vsrc); /* val = (src * quant) >> 4 */ vs32(t0) = vec_mule(val, vs16(vqmat)); vs32(val) = vec_mulo(val, vs16(vqmat)); vs32(t0) = vec_sra(vs32(t0), four); vs16(t0) = vec_pack(vs32(t0), vs32(t0)); vs32(val) = vec_sra(vs32(val), four); vs16(val) = vec_pack(vs32(val), vs32(val)); val = vec_mergeh(vs16(t0), vs16(val)); /* restore sign */ t0 = vec_sub(zero, val); val = vec_sel(val, t0, ltzero); /* val = (val > 2047) ? ((val < -2048) ? -2048 : val); */ val = vec_min(val, max); val = vec_max(val, min); vsum = vec_sum4s(val, vsum); vec_st(val, offset, dst); offset += 8*sizeof(int16_t); } while (--i); /* }}} */ #endif vu.vs32 = vec_sums(vsum, vs32(zero)); sum = vu.s.sum; sum -= dst[0]; sum += dst0; dst[0] = dst0; /* mismatch control */ #if 1 dst[63] ^= !(sum & 1); #else if ((sum & 1) == 0) dst[63] ^= 1; #endif AMBER_STOP; }