Beispiel #1
0
static void yuv2plane1_8_vsx(const int16_t *src, uint8_t *dest, int dstW,
                           const uint8_t *dither, int offset)
{
    const int dst_u = -(uintptr_t)dest & 15;
    int i, j;
    LOCAL_ALIGNED(16, int16_t, val, [16]);
    const vector uint16_t shifts = (vector uint16_t) {7, 7, 7, 7, 7, 7, 7, 7};
    vector int16_t vi, vileft, ditherleft, ditherright;
    vector uint8_t vd;

    for (j = 0; j < 16; j++) {
        val[j] = dither[(dst_u + offset + j) & 7];
    }

    ditherleft = vec_ld(0, val);
    ditherright = vec_ld(0, &val[8]);

    yuv2plane1_8_u(src, dest, dst_u, dither, offset, 0);

    for (i = dst_u; i < dstW - 15; i += 16) {

        vi = vec_vsx_ld(0, &src[i]);
        vi = vec_adds(ditherleft, vi);
        vileft = vec_sra(vi, shifts);

        vi = vec_vsx_ld(0, &src[i + 8]);
        vi = vec_adds(ditherright, vi);
        vi = vec_sra(vi, shifts);

        vd = vec_packsu(vileft, vi);
        vec_st(vd, 0, &dest[i]);
    }

    yuv2plane1_8_u(src, dest, dstW, dither, offset, i);
}
Beispiel #2
0
static void predict_16x16_p_altivec( uint8_t *src )
{
    int16_t a, b, c, i;
    int H = 0;
    int V = 0;
    int16_t i00;

    for( i = 1; i <= 8; i++ )
    {
        H += i * ( src[7+i - FDEC_STRIDE ]  - src[7-i - FDEC_STRIDE ] );
        V += i * ( src[(7+i)*FDEC_STRIDE -1] - src[(7-i)*FDEC_STRIDE -1] );
    }

    a = 16 * ( src[15*FDEC_STRIDE -1] + src[15 - FDEC_STRIDE] );
    b = ( 5 * H + 32 ) >> 6;
    c = ( 5 * V + 32 ) >> 6;
    i00 = a - b * 7 - c * 7 + 16;

    vect_sshort_u i00_u, b_u, c_u;
    i00_u.s[0] = i00;
    b_u.s[0]   = b;
    c_u.s[0]   = c;

    vec_u16_t val5_v = vec_splat_u16(5);
    vec_s16_t i00_v, b_v, c_v;
    i00_v = vec_splat(i00_u.v, 0);
    b_v = vec_splat(b_u.v, 0);
    c_v = vec_splat(c_u.v, 0);
    vec_s16_t induc_v  = (vec_s16_t) CV(0,  1,  2,  3,  4,  5,  6,  7);
    vec_s16_t b8_v = vec_sl(b_v, vec_splat_u16(3));
    vec_s32_t mule_b_v = vec_mule(induc_v, b_v);
    vec_s32_t mulo_b_v = vec_mulo(induc_v, b_v);
    vec_s16_t mul_b_induc0_v = vec_pack(vec_mergeh(mule_b_v, mulo_b_v), vec_mergel(mule_b_v, mulo_b_v));
    vec_s16_t add_i0_b_0v = vec_adds(i00_v, mul_b_induc0_v);
    vec_s16_t add_i0_b_8v = vec_adds(b8_v, add_i0_b_0v);

    int y;

    for( y = 0; y < 16; y++ )
    {
        vec_s16_t shift_0_v = vec_sra(add_i0_b_0v, val5_v);
        vec_s16_t shift_8_v = vec_sra(add_i0_b_8v, val5_v);
        vec_u8_t com_sat_v = vec_packsu(shift_0_v, shift_8_v);
        vec_st( com_sat_v, 0, &src[0]);
        src += FDEC_STRIDE;
        i00 += c;
        add_i0_b_0v = vec_adds(add_i0_b_0v, c_v);
        add_i0_b_8v = vec_adds(add_i0_b_8v, c_v);
    }
}
Beispiel #3
0
/* next one assumes that ((line_size % 8) == 0) */
static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
    register vector unsigned char blockv, temp1, temp2;
    register vector unsigned short pixelssum1, pixelssum2, temp3;
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
    register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
    register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);

    temp1 = vec_ld(0, pixels);
    temp2 = vec_ld(16, pixels);
    pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
    if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
        pixelsv2 = temp2;
    } else {
        pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
    }
    pixelsv1 = vec_mergeh(vczero, pixelsv1);
    pixelsv2 = vec_mergeh(vczero, pixelsv2);
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vcone);

    for (i = 0; i < h ; i++) {
        int rightside = ((unsigned long)block & 0x0000000F);
        blockv = vec_ld(0, block);

        temp1 = vec_ld(line_size, pixels);
        temp2 = vec_ld(line_size + 16, pixels);
        pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
        if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
            pixelsv2 = temp2;
        } else {
            pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
        }

        pixelsv1 = vec_mergeh(vczero, pixelsv1);
        pixelsv2 = vec_mergeh(vczero, pixelsv2);
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);
        pixelssum1 = vec_add(pixelssum2, vcone);
        pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);

        if (rightside) {
            blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
        } else {
            blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
        }

        vec_st(blockv, 0, block);

        block += line_size;
        pixels += line_size;
    }
}
Beispiel #4
0
static void yuv2planeX_16_altivec(const int16_t *filter, int filterSize,
                                  const int16_t **src, uint8_t *dest,
                                  const uint8_t *dither, int offset, int x)
{
    register int i, j;
    DECLARE_ALIGNED(16, int, val)[16];
    vector signed int vo1, vo2, vo3, vo4;
    vector unsigned short vs1, vs2;
    vector unsigned char vf;
    vector unsigned int altivec_vectorShiftInt19 =
        vec_add(vec_splat_u32(10), vec_splat_u32(9));

    for (i = 0; i < 16; i++)
        val[i] = dither[(x + i + offset) & 7] << 12;

    vo1 = vec_ld(0,  val);
    vo2 = vec_ld(16, val);
    vo3 = vec_ld(32, val);
    vo4 = vec_ld(48, val);

    for (j = 0; j < filterSize; j++) {
        vector signed short l1, vLumFilter = vec_ld(j << 1, filter);
        vector unsigned char perm, perm0 = vec_lvsl(j << 1, filter);
        vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0);
        vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter

        perm = vec_lvsl(x << 1, src[j]);
        l1   = vec_ld(x << 1, src[j]);

        yuv2planeX_8(vo1, vo2, l1, src[j], x,     perm, vLumFilter);
        yuv2planeX_8(vo3, vo4, l1, src[j], x + 8, perm, vLumFilter);
    }

    vo1 = vec_sra(vo1, altivec_vectorShiftInt19);
    vo2 = vec_sra(vo2, altivec_vectorShiftInt19);
    vo3 = vec_sra(vo3, altivec_vectorShiftInt19);
    vo4 = vec_sra(vo4, altivec_vectorShiftInt19);
    vs1 = vec_packsu(vo1, vo2);
    vs2 = vec_packsu(vo3, vo4);
    vf  = vec_packsu(vs1, vs2);
    vec_st(vf, 0, dest);
}
Beispiel #5
0
static void predict_16x16_p_altivec( uint8_t *src )
{
    int H = 0, V = 0;

    for( int i = 1; i <= 8; i++ )
    {
        H += i * ( src[7+i - FDEC_STRIDE ]  - src[7-i - FDEC_STRIDE ] );
        V += i * ( src[(7+i)*FDEC_STRIDE -1] - src[(7-i)*FDEC_STRIDE -1] );
    }

    int a = 16 * ( src[15*FDEC_STRIDE -1] + src[15 - FDEC_STRIDE] );
    int b = ( 5 * H + 32 ) >> 6;
    int c = ( 5 * V + 32 ) >> 6;
    int i00 = a - b * 7 - c * 7 + 16;

    vec_s16_u i00_u, b_u, c_u;
    i00_u.s[0] = i00;
    b_u.s[0]   = b;
    c_u.s[0]   = c;

    vec_u16_t val5_v = vec_splat_u16(5);
    vec_s16_t i00_v, b_v, c_v;
    i00_v = vec_splat(i00_u.v, 0);
    b_v = vec_splat(b_u.v, 0);
    c_v = vec_splat(c_u.v, 0);
    vec_s16_t induc_v  = (vec_s16_t) CV(0,  1,  2,  3,  4,  5,  6,  7);
    vec_s16_t b8_v = vec_sl(b_v, vec_splat_u16(3));
    vec_s16_t add_i0_b_0v = vec_mladd(induc_v, b_v, i00_v);
    vec_s16_t add_i0_b_8v = vec_adds(b8_v, add_i0_b_0v);

    for( int y = 0; y < 16; y++ )
    {
        vec_s16_t shift_0_v = vec_sra(add_i0_b_0v, val5_v);
        vec_s16_t shift_8_v = vec_sra(add_i0_b_8v, val5_v);
        vec_u8_t com_sat_v = vec_packsu(shift_0_v, shift_8_v);
        vec_st( com_sat_v, 0, &src[0]);
        src += FDEC_STRIDE;
        add_i0_b_0v = vec_adds(add_i0_b_0v, c_v);
        add_i0_b_8v = vec_adds(add_i0_b_8v, c_v);
    }
}
Beispiel #6
0
static void yuv2planeX_16_altivec(const int16_t *filter, int filterSize,
                                  const int16_t **src, uint8_t *dest,
                                  const uint8_t *dither, int offset, int x)
{
    register int i, j;
    LOCAL_ALIGNED(16, int, val, [16]);
    vector signed int vo1, vo2, vo3, vo4;
    vector unsigned short vs1, vs2;
    vector unsigned char vf;
    vector unsigned int altivec_vectorShiftInt19 =
        vec_add(vec_splat_u32(10), vec_splat_u32(9));

    for (i = 0; i < 16; i++)
        val[i] = dither[(x + i + offset) & 7] << 12;

    vo1 = vec_ld(0,  val);
    vo2 = vec_ld(16, val);
    vo3 = vec_ld(32, val);
    vo4 = vec_ld(48, val);

    for (j = 0; j < filterSize; j++) {
        unsigned int joffset=j<<1;
        unsigned int xoffset=x<<1;
        vector unsigned char perm;
        vector signed short l1,vLumFilter;
        LOAD_FILTER(vLumFilter,filter);
        vLumFilter = vec_splat(vLumFilter, 0);
        LOAD_L1(l1,src[j],perm);
        yuv2planeX_8(vo1, vo2, l1, src[j], x,     perm, vLumFilter);
        yuv2planeX_8(vo3, vo4, l1, src[j], x + 8, perm, vLumFilter);
    }

    vo1 = vec_sra(vo1, altivec_vectorShiftInt19);
    vo2 = vec_sra(vo2, altivec_vectorShiftInt19);
    vo3 = vec_sra(vo3, altivec_vectorShiftInt19);
    vo4 = vec_sra(vo4, altivec_vectorShiftInt19);
    vs1 = vec_packsu(vo1, vo2);
    vs2 = vec_packsu(vo3, vo4);
    vf  = vec_packsu(vs1, vs2);
    VEC_ST(vf, 0, dest);
}
Beispiel #7
0
static inline void
altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW)
{
    register int i;
    vector unsigned int altivec_vectorShiftInt19 =
        vec_add(vec_splat_u32(10), vec_splat_u32(9));
    if ((uintptr_t)dest % 16) {
        /* badly aligned store, we force store alignment */
        /* and will handle load misalignment on val w/ vec_perm */
        vector unsigned char perm1;
        vector signed int v1;
        for (i = 0 ; (i < dstW) &&
            (((uintptr_t)dest + i) % 16) ; i++) {
                int t = val[i] >> 19;
                dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
        }
        perm1 = vec_lvsl(i << 2, val);
        v1 = vec_ld(i << 2, val);
        for ( ; i < (dstW - 15); i+=16) {
            int offset = i << 2;
            vector signed int v2 = vec_ld(offset + 16, val);
            vector signed int v3 = vec_ld(offset + 32, val);
            vector signed int v4 = vec_ld(offset + 48, val);
            vector signed int v5 = vec_ld(offset + 64, val);
            vector signed int v12 = vec_perm(v1, v2, perm1);
            vector signed int v23 = vec_perm(v2, v3, perm1);
            vector signed int v34 = vec_perm(v3, v4, perm1);
            vector signed int v45 = vec_perm(v4, v5, perm1);

            vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19);
            vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19);
            vector signed int vC = vec_sra(v34, altivec_vectorShiftInt19);
            vector signed int vD = vec_sra(v45, altivec_vectorShiftInt19);
            vector unsigned short vs1 = vec_packsu(vA, vB);
            vector unsigned short vs2 = vec_packsu(vC, vD);
            vector unsigned char vf = vec_packsu(vs1, vs2);
            vec_st(vf, i, dest);
            v1 = v5;
        }
    } else { // dest is properly aligned, great
Beispiel #8
0
static void h264_idct_add_altivec(uint8_t *dst, int16_t *block, int stride)
{
    vec_s16 va0, va1, va2, va3;
    vec_s16 vz0, vz1, vz2, vz3;
    vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
    vec_u8 va_u8;
    vec_u32 va_u32;
    vec_s16 vdst_ss;
    const vec_u16 v6us = vec_splat_u16(6);
    vec_u8 vdst, vdst_orig;
    vec_u8 vdst_mask = vec_lvsl(0, dst);
    int element = ((unsigned long)dst & 0xf) >> 2;
    LOAD_ZERO;

    block[0] += 32;  /* add 32 as a DC-level for rounding */

    vtmp0 = vec_ld(0,block);
    vtmp1 = vec_sld(vtmp0, vtmp0, 8);
    vtmp2 = vec_ld(16,block);
    vtmp3 = vec_sld(vtmp2, vtmp2, 8);
    memset(block, 0, 16 * sizeof(int16_t));

    VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
    VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
    VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);

    va0 = vec_sra(va0,v6us);
    va1 = vec_sra(va1,v6us);
    va2 = vec_sra(va2,v6us);
    va3 = vec_sra(va3,v6us);

    VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
    dst += stride;
    VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
    dst += stride;
    VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
    dst += stride;
    VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
}
Beispiel #9
0
void x264_add8x8_idct_dc_altivec( uint8_t *p_dst, int16_t dct[4] )
{
    vec_s16_t dcv;
    vec_s16_t v32 = vec_sl( vec_splat_s16( 8 ), vec_splat_u16( 2 ) );
    vec_u16_t v6 = vec_splat_u16( 6 );
    vec_s16_t dctv = vec_vsx_ld( 0, dct );

    dctv = vec_sra( vec_add( dctv, v32 ), v6 );
    dcv = (vec_s16_t)vec_mergeh( (vec_s32_t)vec_splat( dctv, 0 ), (vec_s32_t)vec_splat( dctv, 1 ) );
    dcv = (vec_s16_t)vec_mergeh( (vec_s32_t)dcv, (vec_s32_t)dcv );
    idct8_dc_altivec( &p_dst[0], dcv );
    dcv = (vec_s16_t)vec_mergeh( (vec_s32_t)vec_splat( dctv, 2 ), (vec_s32_t)vec_splat( dctv, 3 ) );
    dcv = (vec_s16_t)vec_mergeh( (vec_s32_t)dcv, (vec_s32_t)dcv );
    idct8_dc_altivec( &p_dst[4*FDEC_STRIDE+0], dcv );
}
Beispiel #10
0
__attribute__((noinline)) void foo ()
{
  int i;
  vector long long va, vb, vc, vd, tmp;
  volatile unsigned long long three = 3;
  vector unsigned long long threes = vec_splats (three);
  for (i = 0; i < N; i+=2) {
    vb = vec_vsx_ld (0, (vector long long *)&cb[i]);
    vc = vec_vsx_ld (0, (vector long long *)&cc[i]);
    vd = vec_vsx_ld (0, (vector long long *)&cd[i]);
    tmp = vec_add (vb, vc);
    tmp = vec_sub (tmp, vd);
    tmp = vec_sra (tmp, threes);
    x = vec_extract (tmp, 0);
    vec_vsx_st (tmp, 0, (vector long long *)&ca[i]);
  }
}
Beispiel #11
0
static void predict_8x8c_p_altivec( uint8_t *src )
{
    int H = 0, V = 0;

    for( int i = 0; i < 4; i++ )
    {
        H += ( i + 1 ) * ( src[4+i - FDEC_STRIDE] - src[2 - i -FDEC_STRIDE] );
        V += ( i + 1 ) * ( src[-1 +(i+4)*FDEC_STRIDE] - src[-1+(2-i)*FDEC_STRIDE] );
    }

    int a = 16 * ( src[-1+7*FDEC_STRIDE] + src[7 - FDEC_STRIDE] );
    int b = ( 17 * H + 16 ) >> 5;
    int c = ( 17 * V + 16 ) >> 5;
    int i00 = a -3*b -3*c + 16;

    vec_s16_u i00_u, b_u, c_u;
    i00_u.s[0] = i00;
    b_u.s[0]   = b;
    c_u.s[0]   = c;

    vec_u16_t val5_v = vec_splat_u16(5);
    vec_s16_t i00_v, b_v, c_v;
    i00_v = vec_splat(i00_u.v, 0);
    b_v = vec_splat(b_u.v, 0);
    c_v = vec_splat(c_u.v, 0);

    vec_s16_t induc_v  = (vec_s16_t) CV(0, 1, 2, 3, 4, 5, 6, 7);
    vec_s16_t add_i0_b_0v = vec_mladd(induc_v, b_v, i00_v);

    PREP_STORE8;

    for( int i = 0; i < 8; ++i )
    {
        vec_s16_t shift_0_v = vec_sra(add_i0_b_0v, val5_v);
        vec_u8_t com_sat_v = vec_packsu(shift_0_v, shift_0_v);
        VEC_STORE8(com_sat_v, &src[0]);
        src += FDEC_STRIDE;
        add_i0_b_0v = vec_adds(add_i0_b_0v, c_v);

    }
}
/* this code assume stride % 16 == 0 *and* tmp is properly aligned */
static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) {
  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_hv_lowpass_num, 1);
  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
  register int i;
  const vector signed int vzero = vec_splat_s32(0);
  const vector unsigned char permM2 = vec_lvsl(-2, src);
  const vector unsigned char permM1 = vec_lvsl(-1, src);
  const vector unsigned char permP0 = vec_lvsl(+0, src);
  const vector unsigned char permP1 = vec_lvsl(+1, src);
  const vector unsigned char permP2 = vec_lvsl(+2, src);
  const vector unsigned char permP3 = vec_lvsl(+3, src);
  const vector signed short v20ss = (const vector signed short)AVV(20);
  const vector unsigned int v10ui = vec_splat_u32(10);
  const vector signed short v5ss = vec_splat_s16(5);
  const vector signed short v1ss = vec_splat_s16(1);
  const vector signed int v512si = (const vector signed int)AVV(512);
  const vector unsigned int v16ui = (const vector unsigned int)AVV(16);

  register int align = ((((unsigned long)src) - 2) % 16);

  src -= (2 * srcStride);

  for (i = 0 ; i < 21 ; i ++) {
    vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
    vector unsigned char srcR1 = vec_ld(-2, src);
    vector unsigned char srcR2 = vec_ld(14, src);

    switch (align) {
    default: {
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = vec_perm(srcR1, srcR2, permP2);
      srcP3 = vec_perm(srcR1, srcR2, permP3);
    } break;
    case 11: {
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = vec_perm(srcR1, srcR2, permP2);
      srcP3 = srcR2;
    } break;
    case 12: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = srcR2;
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 13: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = srcR2;
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 14: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = srcR2;
      srcP1 = vec_perm(srcR2, srcR3, permP1);
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 15: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = srcR2;
      srcP0 = vec_perm(srcR2, srcR3, permP0);
      srcP1 = vec_perm(srcR2, srcR3, permP1);
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    }

    const vector signed short srcP0A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0);
    const vector signed short srcP0B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0);
    const vector signed short srcP1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1);
    const vector signed short srcP1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1);

    const vector signed short srcP2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2);
    const vector signed short srcP2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2);
    const vector signed short srcP3A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3);
    const vector signed short srcP3B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3);

    const vector signed short srcM1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1);
    const vector signed short srcM1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1);
    const vector signed short srcM2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2);
    const vector signed short srcM2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2);

    const vector signed short sum1A = vec_adds(srcP0A, srcP1A);
    const vector signed short sum1B = vec_adds(srcP0B, srcP1B);
    const vector signed short sum2A = vec_adds(srcM1A, srcP2A);
    const vector signed short sum2B = vec_adds(srcM1B, srcP2B);
    const vector signed short sum3A = vec_adds(srcM2A, srcP3A);
    const vector signed short sum3B = vec_adds(srcM2B, srcP3B);
    
    const vector signed short pp1A = vec_mladd(sum1A, v20ss, sum3A);
    const vector signed short pp1B = vec_mladd(sum1B, v20ss, sum3B);

    const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
    const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);

    const vector signed short psumA = vec_sub(pp1A, pp2A);
    const vector signed short psumB = vec_sub(pp1B, pp2B);

    vec_st(psumA, 0, tmp);
    vec_st(psumB, 16, tmp);
    
    src += srcStride;
    tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */
  }
  
  const vector unsigned char dstperm = vec_lvsr(0, dst);
  const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1);
  const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
  const vector unsigned char mperm = (const vector unsigned char)
    AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
        0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F);
  
  int16_t *tmpbis = tmp - (tmpStride * 21);

  vector signed short tmpM2ssA = vec_ld(0, tmpbis);
  vector signed short tmpM2ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;
  vector signed short tmpM1ssA = vec_ld(0, tmpbis);
  vector signed short tmpM1ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;
  vector signed short tmpP0ssA = vec_ld(0, tmpbis);
  vector signed short tmpP0ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;
  vector signed short tmpP1ssA = vec_ld(0, tmpbis);
  vector signed short tmpP1ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;
  vector signed short tmpP2ssA = vec_ld(0, tmpbis);
  vector signed short tmpP2ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;

  for (i = 0 ; i < 16 ; i++) {
    const vector signed short tmpP3ssA = vec_ld(0, tmpbis);
    const vector signed short tmpP3ssB = vec_ld(16, tmpbis);
    tmpbis += tmpStride;

    const vector signed short sum1A = vec_adds(tmpP0ssA, tmpP1ssA);
    const vector signed short sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
    const vector signed short sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
    const vector signed short sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
    const vector signed short sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
    const vector signed short sum3B = vec_adds(tmpM2ssB, tmpP3ssB);

    tmpM2ssA = tmpM1ssA;
    tmpM2ssB = tmpM1ssB;
    tmpM1ssA = tmpP0ssA;
    tmpM1ssB = tmpP0ssB;
    tmpP0ssA = tmpP1ssA;
    tmpP0ssB = tmpP1ssB;
    tmpP1ssA = tmpP2ssA;
    tmpP1ssB = tmpP2ssB;
    tmpP2ssA = tmpP3ssA;
    tmpP2ssB = tmpP3ssB;

    const vector signed int pp1Ae = vec_mule(sum1A, v20ss);
    const vector signed int pp1Ao = vec_mulo(sum1A, v20ss);
    const vector signed int pp1Be = vec_mule(sum1B, v20ss);
    const vector signed int pp1Bo = vec_mulo(sum1B, v20ss);

    const vector signed int pp2Ae = vec_mule(sum2A, v5ss);
    const vector signed int pp2Ao = vec_mulo(sum2A, v5ss);
    const vector signed int pp2Be = vec_mule(sum2B, v5ss);
    const vector signed int pp2Bo = vec_mulo(sum2B, v5ss);

    const vector signed int pp3Ae = vec_sra((vector signed int)sum3A, v16ui);
    const vector signed int pp3Ao = vec_mulo(sum3A, v1ss);
    const vector signed int pp3Be = vec_sra((vector signed int)sum3B, v16ui);
    const vector signed int pp3Bo = vec_mulo(sum3B, v1ss);

    const vector signed int pp1cAe = vec_add(pp1Ae, v512si);
    const vector signed int pp1cAo = vec_add(pp1Ao, v512si);
    const vector signed int pp1cBe = vec_add(pp1Be, v512si);
    const vector signed int pp1cBo = vec_add(pp1Bo, v512si);

    const vector signed int pp32Ae = vec_sub(pp3Ae, pp2Ae);
    const vector signed int pp32Ao = vec_sub(pp3Ao, pp2Ao);
    const vector signed int pp32Be = vec_sub(pp3Be, pp2Be);
    const vector signed int pp32Bo = vec_sub(pp3Bo, pp2Bo);

    const vector signed int sumAe = vec_add(pp1cAe, pp32Ae);
    const vector signed int sumAo = vec_add(pp1cAo, pp32Ao);
    const vector signed int sumBe = vec_add(pp1cBe, pp32Be);
    const vector signed int sumBo = vec_add(pp1cBo, pp32Bo);
    
    const vector signed int ssumAe = vec_sra(sumAe, v10ui);
    const vector signed int ssumAo = vec_sra(sumAo, v10ui);
    const vector signed int ssumBe = vec_sra(sumBe, v10ui);
    const vector signed int ssumBo = vec_sra(sumBo, v10ui);

    const vector signed short ssume = vec_packs(ssumAe, ssumBe);
    const vector signed short ssumo = vec_packs(ssumAo, ssumBo);

    const vector unsigned char sumv = vec_packsu(ssume, ssumo);
    const vector unsigned char sum = vec_perm(sumv, sumv, mperm);

    const vector unsigned char dst1 = vec_ld(0, dst);
    const vector unsigned char dst2 = vec_ld(16, dst);
    const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));

    vector unsigned char fsum;
    OP_U8_ALTIVEC(fsum, sum, vdst);

    const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm);
    const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask);
    const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask);

    vec_st(fdst1, 0, dst);
    vec_st(fdst2, 16, dst);

    dst += dstStride;
  }
  POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
}
/* this code assume stride % 16 == 0 */
static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_v_lowpass_num, 1);
  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
  
  register int i;

  const vector signed int vzero = vec_splat_s32(0);
  const vector unsigned char perm = vec_lvsl(0, src);
  const vector signed short v20ss = (const vector signed short)AVV(20);
  const vector unsigned short v5us = vec_splat_u16(5);
  const vector signed short v5ss = vec_splat_s16(5);
  const vector signed short v16ss = (const vector signed short)AVV(16);
  const vector unsigned char dstperm = vec_lvsr(0, dst);
  const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1);
  const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
  
  uint8_t *srcbis = src - (srcStride * 2);

  const vector unsigned char srcM2a = vec_ld(0, srcbis);
  const vector unsigned char srcM2b = vec_ld(16, srcbis);
  const vector unsigned char srcM2 = vec_perm(srcM2a, srcM2b, perm);
  srcbis += srcStride;
  const vector unsigned char srcM1a = vec_ld(0, srcbis);
  const vector unsigned char srcM1b = vec_ld(16, srcbis);
  const vector unsigned char srcM1 = vec_perm(srcM1a, srcM1b, perm);
  srcbis += srcStride;
  const vector unsigned char srcP0a = vec_ld(0, srcbis);
  const vector unsigned char srcP0b = vec_ld(16, srcbis);
  const vector unsigned char srcP0 = vec_perm(srcP0a, srcP0b, perm);
  srcbis += srcStride;
  const vector unsigned char srcP1a = vec_ld(0, srcbis);
  const vector unsigned char srcP1b = vec_ld(16, srcbis);
  const vector unsigned char srcP1 = vec_perm(srcP1a, srcP1b, perm);
  srcbis += srcStride;
  const vector unsigned char srcP2a = vec_ld(0, srcbis);
  const vector unsigned char srcP2b = vec_ld(16, srcbis);
  const vector unsigned char srcP2 = vec_perm(srcP2a, srcP2b, perm);
  srcbis += srcStride;

  vector signed short srcM2ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2);
  vector signed short srcM2ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2);
  vector signed short srcM1ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1);
  vector signed short srcM1ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1);
  vector signed short srcP0ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0);
  vector signed short srcP0ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0);
  vector signed short srcP1ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1);
  vector signed short srcP1ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1);
  vector signed short srcP2ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2);
  vector signed short srcP2ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2);

  for (i = 0 ; i < 16 ; i++) {
    const vector unsigned char srcP3a = vec_ld(0, srcbis);
    const vector unsigned char srcP3b = vec_ld(16, srcbis);
    const vector unsigned char srcP3 = vec_perm(srcP3a, srcP3b, perm);
    const vector signed short srcP3ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3);
    const vector signed short srcP3ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3);
    srcbis += srcStride;

    const vector signed short sum1A = vec_adds(srcP0ssA, srcP1ssA);
    const vector signed short sum1B = vec_adds(srcP0ssB, srcP1ssB);
    const vector signed short sum2A = vec_adds(srcM1ssA, srcP2ssA);
    const vector signed short sum2B = vec_adds(srcM1ssB, srcP2ssB);
    const vector signed short sum3A = vec_adds(srcM2ssA, srcP3ssA);
    const vector signed short sum3B = vec_adds(srcM2ssB, srcP3ssB);

    srcM2ssA = srcM1ssA;
    srcM2ssB = srcM1ssB;
    srcM1ssA = srcP0ssA;
    srcM1ssB = srcP0ssB;
    srcP0ssA = srcP1ssA;
    srcP0ssB = srcP1ssB;
    srcP1ssA = srcP2ssA;
    srcP1ssB = srcP2ssB;
    srcP2ssA = srcP3ssA;
    srcP2ssB = srcP3ssB;
    
    const vector signed short pp1A = vec_mladd(sum1A, v20ss, v16ss);
    const vector signed short pp1B = vec_mladd(sum1B, v20ss, v16ss);

    const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
    const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
    
    const vector signed short pp3A = vec_add(sum3A, pp1A);
    const vector signed short pp3B = vec_add(sum3B, pp1B);

    const vector signed short psumA = vec_sub(pp3A, pp2A);
    const vector signed short psumB = vec_sub(pp3B, pp2B);

    const vector signed short sumA = vec_sra(psumA, v5us);
    const vector signed short sumB = vec_sra(psumB, v5us);

    const vector unsigned char sum = vec_packsu(sumA, sumB);

    const vector unsigned char dst1 = vec_ld(0, dst);
    const vector unsigned char dst2 = vec_ld(16, dst);
    const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));

    vector unsigned char fsum;
    OP_U8_ALTIVEC(fsum, sum, vdst);

    const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm);
    const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask);
    const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask);

    vec_st(fdst1, 0, dst);
    vec_st(fdst2, 16, dst);

    dst += dstStride;
  }
  POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
}
/* this code assume that stride % 16 == 0 */
void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
  POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1);
  POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1);
    signed int ABCD[4] __attribute__((aligned(16)));
    register int i;
    ABCD[0] = ((8 - x) * (8 - y));
    ABCD[1] = ((x) * (8 - y));
    ABCD[2] = ((8 - x) * (y));
    ABCD[3] = ((x) * (y));
    const vector signed int vABCD = vec_ld(0, ABCD);
    const vector signed short vA = vec_splat((vector signed short)vABCD, 1);
    const vector signed short vB = vec_splat((vector signed short)vABCD, 3);
    const vector signed short vC = vec_splat((vector signed short)vABCD, 5);
    const vector signed short vD = vec_splat((vector signed short)vABCD, 7);
    const vector signed int vzero = vec_splat_s32(0);
    const vector signed short v32ss = (const vector signed short)AVV(32);
    const vector unsigned short v6us = vec_splat_u16(6);

    vector unsigned char fperm;

    if (((unsigned long)dst) % 16 == 0) {
      fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
                                        0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
    } else {
      fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
                                        0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
    }

    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
    
    vector unsigned char vsrcAuc;
    vector unsigned char vsrcBuc;
    vector unsigned char vsrcperm0;
    vector unsigned char vsrcperm1;
    vsrcAuc = vec_ld(0, src);
    if (loadSecond)
      vsrcBuc = vec_ld(16, src);
    vsrcperm0 = vec_lvsl(0, src);
    vsrcperm1 = vec_lvsl(1, src);
    
    vector unsigned char vsrc0uc;
    vector unsigned char vsrc1uc;
    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
    if (reallyBadAlign)
      vsrc1uc = vsrcBuc;
    else
      vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
    
    vector signed short vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc0uc);
    vector signed short vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc1uc);

    if (!loadSecond) {// -> !reallyBadAlign
      for (i = 0 ; i < h ; i++) {
        vector unsigned char vsrcCuc;
        vsrcCuc = vec_ld(stride + 0, src);
        
        vector unsigned char vsrc2uc;
        vector unsigned char vsrc3uc;
        vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
        vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
        
        vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc);
        vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc);
        
        vector signed short psum;
        
        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v32ss, psum);
        psum = vec_sra(psum, v6us);
        
        vector unsigned char vdst = vec_ld(0, dst);
        vector unsigned char ppsum = (vector unsigned char)vec_packsu(psum, psum);
        
        vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm);
        vector unsigned char fsum;
        
        OP_U8_ALTIVEC(fsum, vfdst, vdst);

        vec_st(fsum, 0, dst);
        
        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;
        
        dst += stride;
        src += stride;
      }
    } else {
      for (i = 0 ; i < h ; i++) {
        vector unsigned char vsrcCuc;
        vector unsigned char vsrcDuc;
        vsrcCuc = vec_ld(stride + 0, src);
        vsrcDuc = vec_ld(stride + 16, src);
        
        vector unsigned char vsrc2uc;
        vector unsigned char vsrc3uc;
        vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
        if (reallyBadAlign)
          vsrc3uc = vsrcDuc;
        else
          vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
        
        vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc);
        vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc);
        
        vector signed short psum;
      
        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v32ss, psum);
        psum = vec_sr(psum, v6us);
        
        vector unsigned char vdst = vec_ld(0, dst);
        vector unsigned char ppsum = (vector unsigned char)vec_pack(psum, psum); 
        
        vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm);
        vector unsigned char fsum;
        
        OP_U8_ALTIVEC(fsum, vfdst, vdst);

        vec_st(fsum, 0, dst);
        
        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;
        
        dst += stride;
        src += stride;
      }
    }
    POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1);
}
/* this code assume stride % 16 == 0 */
static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1);
  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
  register int i;
  
  const vector signed int vzero = vec_splat_s32(0);
  const vector unsigned char permM2 = vec_lvsl(-2, src);
  const vector unsigned char permM1 = vec_lvsl(-1, src);
  const vector unsigned char permP0 = vec_lvsl(+0, src);
  const vector unsigned char permP1 = vec_lvsl(+1, src);
  const vector unsigned char permP2 = vec_lvsl(+2, src);
  const vector unsigned char permP3 = vec_lvsl(+3, src);
  const vector signed short v20ss = (const vector signed short)AVV(20);
  const vector unsigned short v5us = vec_splat_u16(5);
  const vector signed short v5ss = vec_splat_s16(5);
  const vector signed short v16ss = (const vector signed short)AVV(16);
  const vector unsigned char dstperm = vec_lvsr(0, dst);
  const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1);
  const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);

  register int align = ((((unsigned long)src) - 2) % 16);

  for (i = 0 ; i < 16 ; i ++) {
    vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
    vector unsigned char srcR1 = vec_ld(-2, src);
    vector unsigned char srcR2 = vec_ld(14, src);

    switch (align) {
    default: {
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = vec_perm(srcR1, srcR2, permP2);
      srcP3 = vec_perm(srcR1, srcR2, permP3);
    } break;
    case 11: {
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = vec_perm(srcR1, srcR2, permP2);
      srcP3 = srcR2;
    } break;
    case 12: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = srcR2;
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 13: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = srcR2;
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 14: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = srcR2;
      srcP1 = vec_perm(srcR2, srcR3, permP1);
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 15: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = srcR2;
      srcP0 = vec_perm(srcR2, srcR3, permP0);
      srcP1 = vec_perm(srcR2, srcR3, permP1);
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    }

    const vector signed short srcP0A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0);
    const vector signed short srcP0B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0);
    const vector signed short srcP1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1);
    const vector signed short srcP1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1);

    const vector signed short srcP2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2);
    const vector signed short srcP2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2);
    const vector signed short srcP3A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3);
    const vector signed short srcP3B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3);

    const vector signed short srcM1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1);
    const vector signed short srcM1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1);
    const vector signed short srcM2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2);
    const vector signed short srcM2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2);

    const vector signed short sum1A = vec_adds(srcP0A, srcP1A);
    const vector signed short sum1B = vec_adds(srcP0B, srcP1B);
    const vector signed short sum2A = vec_adds(srcM1A, srcP2A);
    const vector signed short sum2B = vec_adds(srcM1B, srcP2B);
    const vector signed short sum3A = vec_adds(srcM2A, srcP3A);
    const vector signed short sum3B = vec_adds(srcM2B, srcP3B);
    
    const vector signed short pp1A = vec_mladd(sum1A, v20ss, v16ss);
    const vector signed short pp1B = vec_mladd(sum1B, v20ss, v16ss);

    const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
    const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
    
    const vector signed short pp3A = vec_add(sum3A, pp1A);
    const vector signed short pp3B = vec_add(sum3B, pp1B);

    const vector signed short psumA = vec_sub(pp3A, pp2A);
    const vector signed short psumB = vec_sub(pp3B, pp2B);

    const vector signed short sumA = vec_sra(psumA, v5us);
    const vector signed short sumB = vec_sra(psumB, v5us);

    const vector unsigned char sum = vec_packsu(sumA, sumB);

    const vector unsigned char dst1 = vec_ld(0, dst);
    const vector unsigned char dst2 = vec_ld(16, dst);
    const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));

    vector unsigned char fsum;
    OP_U8_ALTIVEC(fsum, sum, vdst);

    const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm);
    const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask);
    const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask);

    vec_st(fdst1, 0, dst);
    vec_st(fdst2, 16, dst);

    src += srcStride;
    dst += dstStride;
  }
POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
}
Beispiel #16
0
static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
    register int i;

    LOAD_ZERO;
    const vec_u8 permM2 = vec_lvsl(-2, src);
    const vec_u8 permM1 = vec_lvsl(-1, src);
    const vec_u8 permP0 = vec_lvsl(+0, src);
    const vec_u8 permP1 = vec_lvsl(+1, src);
    const vec_u8 permP2 = vec_lvsl(+2, src);
    const vec_u8 permP3 = vec_lvsl(+3, src);
    const vec_s16 v5ss = vec_splat_s16(5);
    const vec_u16 v5us = vec_splat_u16(5);
    const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
    const vec_s16 v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));

    vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;

    register int align = ((((unsigned long)src) - 2) % 16);

    vec_s16 srcP0A, srcP0B, srcP1A, srcP1B,
              srcP2A, srcP2B, srcP3A, srcP3B,
              srcM1A, srcM1B, srcM2A, srcM2B,
              sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
              pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
              psumA, psumB, sumA, sumB;

    vec_u8 sum, fsum;

    for (i = 0 ; i < 16 ; i ++) {
        vec_u8 srcR1 = vec_ld(-2, src);
        vec_u8 srcR2 = vec_ld(14, src);

        switch (align) {
        default: {
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = vec_perm(srcR1, srcR2, permP1);
            srcP2 = vec_perm(srcR1, srcR2, permP2);
            srcP3 = vec_perm(srcR1, srcR2, permP3);
        } break;
        case 11: {
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = vec_perm(srcR1, srcR2, permP1);
            srcP2 = vec_perm(srcR1, srcR2, permP2);
            srcP3 = srcR2;
        } break;
        case 12: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = vec_perm(srcR1, srcR2, permP1);
            srcP2 = srcR2;
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        case 13: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = srcR2;
            srcP2 = vec_perm(srcR2, srcR3, permP2);
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        case 14: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = srcR2;
            srcP1 = vec_perm(srcR2, srcR3, permP1);
            srcP2 = vec_perm(srcR2, srcR3, permP2);
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        case 15: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = srcR2;
            srcP0 = vec_perm(srcR2, srcR3, permP0);
            srcP1 = vec_perm(srcR2, srcR3, permP1);
            srcP2 = vec_perm(srcR2, srcR3, permP2);
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        }

        srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0);
        srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0);
        srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1);
        srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1);

        srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2);
        srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2);
        srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3);
        srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3);

        srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1);
        srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1);
        srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2);
        srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2);

        sum1A = vec_adds(srcP0A, srcP1A);
        sum1B = vec_adds(srcP0B, srcP1B);
        sum2A = vec_adds(srcM1A, srcP2A);
        sum2B = vec_adds(srcM1B, srcP2B);
        sum3A = vec_adds(srcM2A, srcP3A);
        sum3B = vec_adds(srcM2B, srcP3B);

        pp1A = vec_mladd(sum1A, v20ss, v16ss);
        pp1B = vec_mladd(sum1B, v20ss, v16ss);

        pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
        pp2B = vec_mladd(sum2B, v5ss, zero_s16v);

        pp3A = vec_add(sum3A, pp1A);
        pp3B = vec_add(sum3B, pp1B);

        psumA = vec_sub(pp3A, pp2A);
        psumB = vec_sub(pp3B, pp2B);

        sumA = vec_sra(psumA, v5us);
        sumB = vec_sra(psumB, v5us);

        sum = vec_packsu(sumA, sumB);

        ASSERT_ALIGNED(dst);

        OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));

        vec_st(fsum, 0, dst);

        src += srcStride;
        dst += dstStride;
    }
}
Beispiel #17
0
static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
    register int i;

    LOAD_ZERO;
    const vec_u8 perm = vec_lvsl(0, src);
    const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
    const vec_u16 v5us = vec_splat_u16(5);
    const vec_s16 v5ss = vec_splat_s16(5);
    const vec_s16 v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));

    uint8_t *srcbis = src - (srcStride * 2);

    const vec_u8 srcM2a = vec_ld(0, srcbis);
    const vec_u8 srcM2b = vec_ld(16, srcbis);
    const vec_u8 srcM2 = vec_perm(srcM2a, srcM2b, perm);
    //srcbis += srcStride;
    const vec_u8 srcM1a = vec_ld(0, srcbis += srcStride);
    const vec_u8 srcM1b = vec_ld(16, srcbis);
    const vec_u8 srcM1 = vec_perm(srcM1a, srcM1b, perm);
    //srcbis += srcStride;
    const vec_u8 srcP0a = vec_ld(0, srcbis += srcStride);
    const vec_u8 srcP0b = vec_ld(16, srcbis);
    const vec_u8 srcP0 = vec_perm(srcP0a, srcP0b, perm);
    //srcbis += srcStride;
    const vec_u8 srcP1a = vec_ld(0, srcbis += srcStride);
    const vec_u8 srcP1b = vec_ld(16, srcbis);
    const vec_u8 srcP1 = vec_perm(srcP1a, srcP1b, perm);
    //srcbis += srcStride;
    const vec_u8 srcP2a = vec_ld(0, srcbis += srcStride);
    const vec_u8 srcP2b = vec_ld(16, srcbis);
    const vec_u8 srcP2 = vec_perm(srcP2a, srcP2b, perm);
    //srcbis += srcStride;

    vec_s16 srcM2ssA = (vec_s16) vec_mergeh(zero_u8v, srcM2);
    vec_s16 srcM2ssB = (vec_s16) vec_mergel(zero_u8v, srcM2);
    vec_s16 srcM1ssA = (vec_s16) vec_mergeh(zero_u8v, srcM1);
    vec_s16 srcM1ssB = (vec_s16) vec_mergel(zero_u8v, srcM1);
    vec_s16 srcP0ssA = (vec_s16) vec_mergeh(zero_u8v, srcP0);
    vec_s16 srcP0ssB = (vec_s16) vec_mergel(zero_u8v, srcP0);
    vec_s16 srcP1ssA = (vec_s16) vec_mergeh(zero_u8v, srcP1);
    vec_s16 srcP1ssB = (vec_s16) vec_mergel(zero_u8v, srcP1);
    vec_s16 srcP2ssA = (vec_s16) vec_mergeh(zero_u8v, srcP2);
    vec_s16 srcP2ssB = (vec_s16) vec_mergel(zero_u8v, srcP2);

    vec_s16 pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
              psumA, psumB, sumA, sumB,
              srcP3ssA, srcP3ssB,
              sum1A, sum1B, sum2A, sum2B, sum3A, sum3B;

    vec_u8 sum, fsum, srcP3a, srcP3b, srcP3;

    for (i = 0 ; i < 16 ; i++) {
        srcP3a = vec_ld(0, srcbis += srcStride);
        srcP3b = vec_ld(16, srcbis);
        srcP3 = vec_perm(srcP3a, srcP3b, perm);
        srcP3ssA = (vec_s16) vec_mergeh(zero_u8v, srcP3);
        srcP3ssB = (vec_s16) vec_mergel(zero_u8v, srcP3);
        //srcbis += srcStride;

        sum1A = vec_adds(srcP0ssA, srcP1ssA);
        sum1B = vec_adds(srcP0ssB, srcP1ssB);
        sum2A = vec_adds(srcM1ssA, srcP2ssA);
        sum2B = vec_adds(srcM1ssB, srcP2ssB);
        sum3A = vec_adds(srcM2ssA, srcP3ssA);
        sum3B = vec_adds(srcM2ssB, srcP3ssB);

        srcM2ssA = srcM1ssA;
        srcM2ssB = srcM1ssB;
        srcM1ssA = srcP0ssA;
        srcM1ssB = srcP0ssB;
        srcP0ssA = srcP1ssA;
        srcP0ssB = srcP1ssB;
        srcP1ssA = srcP2ssA;
        srcP1ssB = srcP2ssB;
        srcP2ssA = srcP3ssA;
        srcP2ssB = srcP3ssB;

        pp1A = vec_mladd(sum1A, v20ss, v16ss);
        pp1B = vec_mladd(sum1B, v20ss, v16ss);

        pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
        pp2B = vec_mladd(sum2B, v5ss, zero_s16v);

        pp3A = vec_add(sum3A, pp1A);
        pp3B = vec_add(sum3B, pp1B);

        psumA = vec_sub(pp3A, pp2A);
        psumB = vec_sub(pp3B, pp2B);

        sumA = vec_sra(psumA, v5us);
        sumB = vec_sra(psumB, v5us);

        sum = vec_packsu(sumA, sumB);

        ASSERT_ALIGNED(dst);

        OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));

        vec_st(fsum, 0, dst);

        dst += dstStride;
    }
}
Beispiel #18
0
int bsumsq_altivec(BSUMSQ_PDECL)
{
    int i;
    uint8_t *pfy, *pby;
    vector unsigned char l0, l1, lR;
    vector unsigned char permF0, permF1, permB0, permB1;
    vector unsigned char vf, vfa, vfb, vfc;
    vector unsigned char vb, vba, vbb, vbc;
    vector unsigned short tH, tL, fH, fL, bH, bL;
    vector unsigned char zero;
    vector unsigned short one, two;
    vector unsigned char max, min, dif;
    vector unsigned int sum;
    union {
	vector signed int v;
	struct {
	    signed int pad[3];
	    signed int sum;
	} s;
    } vo;


 
#ifdef ALTIVEC_VERIFY
    if (hxf != 0 && hxf != 1)
	mjpeg_error_exit1("bsumsq: hxf != [0|1], (hxf=%d)", hxf);

    if (hyf != 0 && hyf != 1)
	mjpeg_error_exit1("bsumsq: hyf != [0|1], (hyf=%d)", hyf);

    if (hxb != 0 && hxb != 1)
	mjpeg_error_exit1("bsumsq: hxb != [0|1], (hxb=%d)", hxb);

    if (hyb != 0 && hyb != 1)
	mjpeg_error_exit1("bsumsq: hyb != [0|1], (hyb=%d)", hyb);

    if (NOT_VECTOR_ALIGNED(p2))
	mjpeg_error_exit1("bsumsq: p2 %% 16 != 0, (0x%X)", p2);

    if (NOT_VECTOR_ALIGNED(rowstride))
	mjpeg_error_exit1("bsumsq: rowstride %% 16 != 0, (%d)", rowstride);

    if (h != 8 && h != 16)
	mjpeg_error_exit1("bsumsq: h != [8|16], (%d)", h);
#endif

    AMBER_START;


    /* start loading first set  */
    vfb = vec_ld(0, pf);	 /* use vfb & vfc as temp for vf & vfa */
    vfc = vec_ld(16, pf);

    pfy = pf + (rowstride * hyf);
    l0 = vec_ld(0, pfy);
    l1 = vec_ld(16, pfy);


    pby = pb + (rowstride * hyb);


    zero  = vec_splat_u8(0);
    one = vec_splat_u16(1);
    two = vec_splat_u16(2);

    sum = vec_splat_u32(0);


    permF0 = vec_lvsl(0, pf);
    permF1 = vec_lvsl(hxf, (unsigned char*)0);
    permF1 = vec_splat(permF1, 0);
    permF1 = vec_add(permF0, permF1);
    
    permB0 = vec_lvsl(0, pb);
    permB1 = vec_lvsl(hxb, (unsigned char*)0);
    permB1 = vec_splat(permB1, 0);
    permB1 = vec_add(permB0, permB1);

    
    i = h - 1;
    do { /* while (--i) */

	vf = vec_perm(vfb, vfc, permF0);
	vfa = vec_perm(vfb, vfc, permF1);
	vfb = vec_perm(l0, l1, permF0);
	vfc = vec_perm(l0, l1, permF1);

	vbb = vec_ld(0, pb);	 /* use vbb & vbc as temp for vb & vba */
	vbc = vec_ld(16, pb);
	l0 = vec_ld(0, pby);
	l1 = vec_ld(16, pby);

	pb += rowstride;
	pby += rowstride;

	/* (unsigned short[]) pf[0-7] */    
	fH = vu16(vec_mergeh(zero, vf));
			
	/* (unsigned short[]) pf[8-15] */   
	fL = vu16(vec_mergel(zero, vf));
			
	/* (unsigned short[]) pfa[0-7] */    
	tH = vu16(vec_mergeh(zero, vfa));
			
	/* (unsigned short[]) pfa[8-15] */   
	tL = vu16(vec_mergel(zero, vfa));

	/* pf[i] + pfa[i] */                                                 
	fH = vec_add(fH, tH);                                               
	fL = vec_add(fL, tL);                                               

	/* (unsigned short[]) pfb[0-7] */  
	tH = vu16(vec_mergeh(zero, vfb));
			
	/* (unsigned short[]) pfb[8-15] */ 
	tL = vu16(vec_mergel(zero, vfb));

	/* (pf[i]+pfa[i]) + pfb[i] */                                       
	fH = vec_add(fH, tH);                                                
	fL = vec_add(fL, tL);                                                
			
	/* (unsigned short[]) pfc[0-7] */  
	tH = vu16(vec_mergeh(zero, vfc));
			
	/* (unsigned short[]) pfc[8-15] */ 
	tL = vu16(vec_mergel(zero, vfc));

	/* (pf[i]+pfa[i]+pfb[i]) + pfc[i] */
	fH = vec_add(fH, tH);                                                
	fL = vec_add(fL, tL);                                                

							
	/* (pf[i]+pfa[i]+pfb[i]+pfc[i]) + 2 */
	fH = vec_add(fH, two);                                                
	fL = vec_add(fL, two);                                                
							
	/* (pf[i]+pfa[i]+pfb[i]+pfc[i]+2) >> 2 */
	fH = vec_sra(fH, two);                                                
	fL = vec_sra(fL, two);                                                


	lR = vec_ld(0, p2);
	p2 += rowstride;

	vb = vec_perm(vbb, vbc, permB0);
	vba = vec_perm(vbb, vbc, permB1);
	vbb = vec_perm(l0, l1, permB0);
	vbc = vec_perm(l0, l1, permB1);


	pf += rowstride;
	vfb = vec_ld(0, pf);	 /* use vfb & vfc as temp for vf & vfa */
	vfc = vec_ld(16, pf);
	pfy += rowstride;
	l0 = vec_ld(0, pfy);
	l1 = vec_ld(16, pfy);

	/* (unsigned short[]) pb[0-7] */    
	bH = vu16(vec_mergeh(zero, vb));

	/* (unsigned short[]) pb[8-15] */   
	bL = vu16(vec_mergel(zero, vb));

	/* (unsigned short[]) pba[0-7] */    
	tH = vu16(vec_mergeh(zero, vba));

	/* (unsigned short[]) pba[8-15] */   
	tL = vu16(vec_mergel(zero, vba));

	/* pb[i] + pba[i] */                                                 
	bH = vec_add(bH, tH);                                               
	bL = vec_add(bL, tL);                                               

	/* (unsigned short[]) pbb[0-7] */  
	tH = vu16(vec_mergeh(zero, vbb));

	/* (unsigned short[]) pbb[8-15] */ 
	tL = vu16(vec_mergel(zero, vbb));

	/* (pb[i]+pba[i]) + pbb[i] */                                       
	bH = vec_add(bH, tH);                                                
	bL = vec_add(bL, tL);                                                
			
	/* (unsigned short[]) pbc[0-7] */  
	tH = vu16(vec_mergeh(zero, vbc));

	/* (unsigned short[]) pbc[8-15] */ 
	tL = vu16(vec_mergel(zero, vbc));

	/* (pb[i]+pba[i]+pbb[i]) + pbc[i] */
	bH = vec_add(bH, tH);                                                
	bL = vec_add(bL, tL);                                                

							
	/* (pb[i]+pba[i]+pbb[i]+pbc[i]) + 2 */
	bH = vec_add(bH, two);                                                
	bL = vec_add(bL, two);                                                

	/* (pb[i]+pba[i]+pbb[i]+pbc[i]+2) >> 2 */
	bH = vec_sra(bH, two);                                                
	bL = vec_sra(bL, two);                                                

	/* ((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2) +
	 * ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)
         */
	tH = vec_add(fH, bH);                                                
	tL = vec_add(fL, bL);                                                

	/* (((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2)+
	 *  ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)) + 1
         */
	tH = vec_add(tH, one);                                                
	tL = vec_add(tL, one);                                                

	/* (((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2)+
	 *  ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)+1) >> 1
         */
	tH = vec_sra(tH, one);                                                
	tL = vec_sra(tL, one);                                                

	/* absolute value increases parallelism (x16 instead of x8)
	 * since a bit isn't lost on the sign.
	 * 
	 * d = abs( ((((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2)+
	 *            ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)+1)>>1) - p2[i] )
         */
	tH = vu16(vec_packsu(tH, tL));
	min = vec_min(vu8(tH), lR);                                           
	max = vec_max(vu8(tH), lR);                                           
	dif = vec_sub(max, min);                                              

	/* sum += (d * d) */                                                   
	sum = vec_msum(dif, dif, sum);                                        

    } while (--i);

    vf = vec_perm(vfb, vfc, permF0);
    vfa = vec_perm(vfb, vfc, permF1);
    vfb = vec_perm(l0, l1, permF0);
    vfc = vec_perm(l0, l1, permF1);

    vbb = vec_ld(0, pb);	 /* use vbb & vbc as temp for vb & vba */
    vbc = vec_ld(16, pb);
    l0 = vec_ld(0, pby);
    l1 = vec_ld(16, pby);

    /* (unsigned short[]) pf[0-7] */    
    fH = vu16(vec_mergeh(zero, vf));
			
    /* (unsigned short[]) pf[8-15] */   
    fL = vu16(vec_mergel(zero, vf));
			
    /* (unsigned short[]) pfa[0-7] */    
    tH = vu16(vec_mergeh(zero, vfa));
			
    /* (unsigned short[]) pfa[8-15] */   
    tL = vu16(vec_mergel(zero, vfa));

    /* pf[i] + pfa[i] */                                                 
    fH = vec_add(fH, tH);                                               
    fL = vec_add(fL, tL);                                               

    /* (unsigned short[]) pfb[0-7] */  
    tH = vu16(vec_mergeh(zero, vfb));

    /* (unsigned short[]) pfb[8-15] */ 
    tL = vu16(vec_mergel(zero, vfb));

    /* (pf[i]+pfa[i]) + pfb[i] */                                       
    fH = vec_add(fH, tH);                                                
    fL = vec_add(fL, tL);                                                

    /* (unsigned short[]) pfc[0-7] */  
    tH = vu16(vec_mergeh(zero, vfc));
			
    /* (unsigned short[]) pfc[8-15] */ 
    tL = vu16(vec_mergel(zero, vfc));

    /* (pf[i]+pfa[i]+pfb[i]) + pfc[i] */
    fH = vec_add(fH, tH);                                                
    fL = vec_add(fL, tL);                                                

    /* (pf[i]+pfa[i]+pfb[i]+pfc[i]) + 2 */
    fH = vec_add(fH, two);
    fL = vec_add(fL, two);

    /* (pf[i]+pfa[i]+pfb[i]+pfc[i]+2) >> 2 */
    fH = vec_sra(fH, two);
    fL = vec_sra(fL, two);

    lR = vec_ld(0, p2);

    vb = vec_perm(vbb, vbc, permB0);
    vba = vec_perm(vbb, vbc, permB1);
    vbb = vec_perm(l0, l1, permB0);
    vbc = vec_perm(l0, l1, permB1);

    /* (unsigned short[]) pb[0-7] */    
    bH = vu16(vec_mergeh(zero, vb));
			
    /* (unsigned short[]) pb[8-15] */   
    bL = vu16(vec_mergel(zero, vb));

    /* (unsigned short[]) pba[0-7] */
    tH = vu16(vec_mergeh(zero, vba));

    /* (unsigned short[]) pba[8-15] */   
    tL = vu16(vec_mergel(zero, vba));

    /* pb[i] + pba[i] */                                                 
    bH = vec_add(bH, tH);                                               
    bL = vec_add(bL, tL);                                               

    /* (unsigned short[]) pbb[0-7] */  
    tH = vu16(vec_mergeh(zero, vbb));

    /* (unsigned short[]) pbb[8-15] */ 
    tL = vu16(vec_mergel(zero, vbb));

    /* (pb[i]+pba[i]) + pbb[i] */                                       
    bH = vec_add(bH, tH);                                                
    bL = vec_add(bL, tL);                                                
			
    /* (unsigned short[]) pbc[0-7] */  
    tH = vu16(vec_mergeh(zero, vbc));
			
    /* (unsigned short[]) pbc[8-15] */ 
    tL = vu16(vec_mergel(zero, vbc));

    /* (pb[i]+pba[i]+pbb[i]) + pbc[i] */
    bH = vec_add(bH, tH);                                                
    bL = vec_add(bL, tL);                                                

							
    /* (pb[i]+pba[i]+pbb[i]+pbc[i]) + 2 */
    bH = vec_add(bH, two);                                                
    bL = vec_add(bL, two);                                                
							
    /* (pb[i]+pba[i]+pbb[i]+pbc[i]+2) >> 2 */
    bH = vec_sra(bH, two);                                                
    bL = vec_sra(bL, two);                                                

    /* ((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2) +
     * ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)
     */
    tH = vec_add(fH, bH);                                                
    tL = vec_add(fL, bL);                                                

    /* (((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2)+
     *  ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)) + 1
     */
    tH = vec_add(tH, one);                                                
    tL = vec_add(tL, one);

    /* (((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2)+
     *  ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)+1) >> 1
     */
    tH = vec_sra(tH, one);                                                
    tL = vec_sra(tL, one);                                                

    /* absolute value increases parallelism (x16 instead of x8)
     * since a bit isn't lost on the sign.
     * 
     * d = abs( ((((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2)+
     *            ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)+1)>>1) - p2[i] )
     */
    tH = vu16(vec_packsu(tH, tL));
    min = vec_min(vu8(tH), lR);                                           
    max = vec_max(vu8(tH), lR);                                           
    dif = vec_sub(max, min);                                              

    /* sum += (d * d) */                                                   
    sum = vec_msum(dif, dif, sum);                                        

    /* sum all parts of difference into one 32 bit quantity */
    vo.v = vec_sums(vs32(sum), vs32(zero));

    AMBER_STOP;
    return vo.s.sum;
}
static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t *dst,
                                                 const uint8_t *src,
                                                 int dstStride, int srcStride)
{
    register int i;

    LOAD_ZERO;
    vec_u8 perm;
#if HAVE_BIGENDIAN
    perm = vec_lvsl(0, src);
#endif
    const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
    const vec_u16 v5us = vec_splat_u16(5);
    const vec_s16 v5ss = vec_splat_s16(5);
    const vec_s16 v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));

    const uint8_t *srcbis = src - (srcStride * 2);

    const vec_u8 srcM2 = load_with_perm_vec(0, srcbis, perm);
    srcbis += srcStride;
    const vec_u8 srcM1 = load_with_perm_vec(0, srcbis, perm);
    srcbis += srcStride;
    const vec_u8 srcP0 = load_with_perm_vec(0, srcbis, perm);
    srcbis += srcStride;
    const vec_u8 srcP1 = load_with_perm_vec(0, srcbis, perm);
    srcbis += srcStride;
    const vec_u8 srcP2 = load_with_perm_vec(0, srcbis, perm);
    srcbis += srcStride;

    vec_s16 srcM2ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcM2);
    vec_s16 srcM2ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcM2);
    vec_s16 srcM1ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcM1);
    vec_s16 srcM1ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcM1);
    vec_s16 srcP0ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcP0);
    vec_s16 srcP0ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcP0);
    vec_s16 srcP1ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcP1);
    vec_s16 srcP1ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcP1);
    vec_s16 srcP2ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcP2);
    vec_s16 srcP2ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcP2);

    vec_s16 pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
              psumA, psumB, sumA, sumB,
              srcP3ssA, srcP3ssB,
              sum1A, sum1B, sum2A, sum2B, sum3A, sum3B;

    vec_u8 sum, fsum, srcP3;

    for (i = 0 ; i < 16 ; i++) {
        srcP3 = load_with_perm_vec(0, srcbis, perm);
        srcbis += srcStride;

        srcP3ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcP3);
        srcP3ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcP3);

        sum1A = vec_adds(srcP0ssA, srcP1ssA);
        sum1B = vec_adds(srcP0ssB, srcP1ssB);
        sum2A = vec_adds(srcM1ssA, srcP2ssA);
        sum2B = vec_adds(srcM1ssB, srcP2ssB);
        sum3A = vec_adds(srcM2ssA, srcP3ssA);
        sum3B = vec_adds(srcM2ssB, srcP3ssB);

        srcM2ssA = srcM1ssA;
        srcM2ssB = srcM1ssB;
        srcM1ssA = srcP0ssA;
        srcM1ssB = srcP0ssB;
        srcP0ssA = srcP1ssA;
        srcP0ssB = srcP1ssB;
        srcP1ssA = srcP2ssA;
        srcP1ssB = srcP2ssB;
        srcP2ssA = srcP3ssA;
        srcP2ssB = srcP3ssB;

        pp1A = vec_mladd(sum1A, v20ss, v16ss);
        pp1B = vec_mladd(sum1B, v20ss, v16ss);

        pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
        pp2B = vec_mladd(sum2B, v5ss, zero_s16v);

        pp3A = vec_add(sum3A, pp1A);
        pp3B = vec_add(sum3B, pp1B);

        psumA = vec_sub(pp3A, pp2A);
        psumB = vec_sub(pp3B, pp2B);

        sumA = vec_sra(psumA, v5us);
        sumB = vec_sra(psumB, v5us);

        sum = vec_packsu(sumA, sumB);

        ASSERT_ALIGNED(dst);

        OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));

        vec_st(fsum, 0, dst);

        dst += dstStride;
    }
}
static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t *dst,
                                                 const uint8_t *src,
                                                 int dstStride, int srcStride)
{
    register int i;

    LOAD_ZERO;
    vec_u8 permM2, permM1, permP0, permP1, permP2, permP3;
    const vec_s16 v5ss = vec_splat_s16(5);
    const vec_u16 v5us = vec_splat_u16(5);
    const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
    const vec_s16 v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));

    vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;

    register int align = ((((unsigned long)src) - 2) % 16);

    vec_s16 srcP0A, srcP0B, srcP1A, srcP1B,
              srcP2A, srcP2B, srcP3A, srcP3B,
              srcM1A, srcM1B, srcM2A, srcM2B,
              sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
              pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
              psumA, psumB, sumA, sumB;

    vec_u8 sum, fsum;

#if HAVE_BIGENDIAN
    permM2 = vec_lvsl(-2, src);
    permM1 = vec_lvsl(-1, src);
    permP0 = vec_lvsl(+0, src);
    permP1 = vec_lvsl(+1, src);
    permP2 = vec_lvsl(+2, src);
    permP3 = vec_lvsl(+3, src);
#endif /* HAVE_BIGENDIAN */

    for (i = 0 ; i < 16 ; i ++) {
        load_alignment(src, align, permM2, permM1, permP0, permP1, permP2, permP3);

        srcP0A = (vec_s16) VEC_MERGEH(zero_u8v, srcP0);
        srcP0B = (vec_s16) VEC_MERGEL(zero_u8v, srcP0);
        srcP1A = (vec_s16) VEC_MERGEH(zero_u8v, srcP1);
        srcP1B = (vec_s16) VEC_MERGEL(zero_u8v, srcP1);

        srcP2A = (vec_s16) VEC_MERGEH(zero_u8v, srcP2);
        srcP2B = (vec_s16) VEC_MERGEL(zero_u8v, srcP2);
        srcP3A = (vec_s16) VEC_MERGEH(zero_u8v, srcP3);
        srcP3B = (vec_s16) VEC_MERGEL(zero_u8v, srcP3);

        srcM1A = (vec_s16) VEC_MERGEH(zero_u8v, srcM1);
        srcM1B = (vec_s16) VEC_MERGEL(zero_u8v, srcM1);
        srcM2A = (vec_s16) VEC_MERGEH(zero_u8v, srcM2);
        srcM2B = (vec_s16) VEC_MERGEL(zero_u8v, srcM2);

        sum1A = vec_adds(srcP0A, srcP1A);
        sum1B = vec_adds(srcP0B, srcP1B);
        sum2A = vec_adds(srcM1A, srcP2A);
        sum2B = vec_adds(srcM1B, srcP2B);
        sum3A = vec_adds(srcM2A, srcP3A);
        sum3B = vec_adds(srcM2B, srcP3B);

        pp1A = vec_mladd(sum1A, v20ss, v16ss);
        pp1B = vec_mladd(sum1B, v20ss, v16ss);

        pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
        pp2B = vec_mladd(sum2B, v5ss, zero_s16v);

        pp3A = vec_add(sum3A, pp1A);
        pp3B = vec_add(sum3B, pp1B);

        psumA = vec_sub(pp3A, pp2A);
        psumB = vec_sub(pp3B, pp2B);

        sumA = vec_sra(psumA, v5us);
        sumB = vec_sra(psumB, v5us);

        sum = vec_packsu(sumA, sumB);

        ASSERT_ALIGNED(dst);

        OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));

        vec_st(fsum, 0, dst);

        src += srcStride;
        dst += dstStride;
    }
}
vector unsigned short
testsra_unsigned (vector unsigned short x, vector unsigned short y)
{
  return vec_sra (x, y);
}
Beispiel #22
0
void
jsimd_quantize_altivec (JCOEFPTR coef_block, DCTELEM * divisors,
                        DCTELEM * workspace)
{
  __vector short row0, row1, row2, row3, row4, row5, row6, row7,
    row0s, row1s, row2s, row3s, row4s, row5s, row6s, row7s,
    corr0, corr1, corr2, corr3, corr4, corr5, corr6, corr7,
    recip0, recip1, recip2, recip3, recip4, recip5, recip6, recip7,
    scale0, scale1, scale2, scale3, scale4, scale5, scale6, scale7;
  __vector unsigned int tmpe, tmpo;

  /* Constants */
  __vector unsigned short pw_word_bit_m1 = { __8X(WORD_BIT - 1) };
#if __BIG_ENDIAN__
  __vector unsigned char shift_pack_index =
    {0,1,16,17,4,5,20,21,8,9,24,25,12,13,28,29};
#else
  __vector unsigned char shift_pack_index =
    {2,3,18,19,6,7,22,23,10,11,26,27,14,15,30,31};
#endif

  row0 = vec_ld(0, workspace);
  row1 = vec_ld(16, workspace);
  row2 = vec_ld(32, workspace);
  row3 = vec_ld(48, workspace);
  row4 = vec_ld(64, workspace);
  row5 = vec_ld(80, workspace);
  row6 = vec_ld(96, workspace);
  row7 = vec_ld(112, workspace);

  /* Branch-less absolute value */
  row0s = vec_sra(row0, pw_word_bit_m1);
  row1s = vec_sra(row1, pw_word_bit_m1);
  row2s = vec_sra(row2, pw_word_bit_m1);
  row3s = vec_sra(row3, pw_word_bit_m1);
  row4s = vec_sra(row4, pw_word_bit_m1);
  row5s = vec_sra(row5, pw_word_bit_m1);
  row6s = vec_sra(row6, pw_word_bit_m1);
  row7s = vec_sra(row7, pw_word_bit_m1);
  row0 = vec_xor(row0, row0s);
  row1 = vec_xor(row1, row1s);
  row2 = vec_xor(row2, row2s);
  row3 = vec_xor(row3, row3s);
  row4 = vec_xor(row4, row4s);
  row5 = vec_xor(row5, row5s);
  row6 = vec_xor(row6, row6s);
  row7 = vec_xor(row7, row7s);
  row0 = vec_sub(row0, row0s);
  row1 = vec_sub(row1, row1s);
  row2 = vec_sub(row2, row2s);
  row3 = vec_sub(row3, row3s);
  row4 = vec_sub(row4, row4s);
  row5 = vec_sub(row5, row5s);
  row6 = vec_sub(row6, row6s);
  row7 = vec_sub(row7, row7s);

  corr0 = vec_ld(DCTSIZE2 * 2, divisors);
  corr1 = vec_ld(DCTSIZE2 * 2 + 16, divisors);
  corr2 = vec_ld(DCTSIZE2 * 2 + 32, divisors);
  corr3 = vec_ld(DCTSIZE2 * 2 + 48, divisors);
  corr4 = vec_ld(DCTSIZE2 * 2 + 64, divisors);
  corr5 = vec_ld(DCTSIZE2 * 2 + 80, divisors);
  corr6 = vec_ld(DCTSIZE2 * 2 + 96, divisors);
  corr7 = vec_ld(DCTSIZE2 * 2 + 112, divisors);

  row0 = vec_add(row0, corr0);
  row1 = vec_add(row1, corr1);
  row2 = vec_add(row2, corr2);
  row3 = vec_add(row3, corr3);
  row4 = vec_add(row4, corr4);
  row5 = vec_add(row5, corr5);
  row6 = vec_add(row6, corr6);
  row7 = vec_add(row7, corr7);

  recip0 = vec_ld(0, divisors);
  recip1 = vec_ld(16, divisors);
  recip2 = vec_ld(32, divisors);
  recip3 = vec_ld(48, divisors);
  recip4 = vec_ld(64, divisors);
  recip5 = vec_ld(80, divisors);
  recip6 = vec_ld(96, divisors);
  recip7 = vec_ld(112, divisors);

  MULTIPLY(row0, recip0, row0);
  MULTIPLY(row1, recip1, row1);
  MULTIPLY(row2, recip2, row2);
  MULTIPLY(row3, recip3, row3);
  MULTIPLY(row4, recip4, row4);
  MULTIPLY(row5, recip5, row5);
  MULTIPLY(row6, recip6, row6);
  MULTIPLY(row7, recip7, row7);

  scale0 = vec_ld(DCTSIZE2 * 4, divisors);
  scale1 = vec_ld(DCTSIZE2 * 4 + 16, divisors);
  scale2 = vec_ld(DCTSIZE2 * 4 + 32, divisors);
  scale3 = vec_ld(DCTSIZE2 * 4 + 48, divisors);
  scale4 = vec_ld(DCTSIZE2 * 4 + 64, divisors);
  scale5 = vec_ld(DCTSIZE2 * 4 + 80, divisors);
  scale6 = vec_ld(DCTSIZE2 * 4 + 96, divisors);
  scale7 = vec_ld(DCTSIZE2 * 4 + 112, divisors);

  MULTIPLY(row0, scale0, row0);
  MULTIPLY(row1, scale1, row1);
  MULTIPLY(row2, scale2, row2);
  MULTIPLY(row3, scale3, row3);
  MULTIPLY(row4, scale4, row4);
  MULTIPLY(row5, scale5, row5);
  MULTIPLY(row6, scale6, row6);
  MULTIPLY(row7, scale7, row7);

  row0 = vec_xor(row0, row0s);
  row1 = vec_xor(row1, row1s);
  row2 = vec_xor(row2, row2s);
  row3 = vec_xor(row3, row3s);
  row4 = vec_xor(row4, row4s);
  row5 = vec_xor(row5, row5s);
  row6 = vec_xor(row6, row6s);
  row7 = vec_xor(row7, row7s);
  row0 = vec_sub(row0, row0s);
  row1 = vec_sub(row1, row1s);
  row2 = vec_sub(row2, row2s);
  row3 = vec_sub(row3, row3s);
  row4 = vec_sub(row4, row4s);
  row5 = vec_sub(row5, row5s);
  row6 = vec_sub(row6, row6s);
  row7 = vec_sub(row7, row7s);

  vec_st(row0, 0, coef_block);
  vec_st(row1, 16, coef_block);
  vec_st(row2, 32, coef_block);
  vec_st(row3, 48, coef_block);
  vec_st(row4, 64, coef_block);
  vec_st(row5, 80, coef_block);
  vec_st(row6, 96, coef_block);
  vec_st(row7, 112, coef_block);
}
Beispiel #23
0
void S_PaintChannelFrom16_altivec( portable_samplepair_t paintbuffer[PAINTBUFFER_SIZE], int snd_vol, channel_t *ch, const sfx_t *sc, int count, int sampleOffset, int bufferOffset ) {
	int						data, aoff, boff;
	int						leftvol, rightvol;
	int						i, j;
	portable_samplepair_t	*samp;
	sndBuffer				*chunk;
	short					*samples;
	float					ooff, fdata[2], fdiv, fleftvol, frightvol;

	if (sc->soundChannels <= 0) {
		return;
	}

	samp = &paintbuffer[ bufferOffset ];

	if (ch->doppler) {
		sampleOffset = sampleOffset*ch->oldDopplerScale;
	}

	if ( sc->soundChannels == 2 ) {
		sampleOffset *= sc->soundChannels;

		if ( sampleOffset & 1 ) {
			sampleOffset &= ~1;
		}
	}

	chunk = sc->soundData;
	while (sampleOffset>=SND_CHUNK_SIZE) {
		chunk = chunk->next;
		sampleOffset -= SND_CHUNK_SIZE;
		if (!chunk) {
			chunk = sc->soundData;
		}
	}

	if (!ch->doppler || ch->dopplerScale==1.0f) {
		vector signed short volume_vec;
		vector unsigned int volume_shift;
		int vectorCount, samplesLeft, chunkSamplesLeft;
		leftvol = ch->leftvol*snd_vol;
		rightvol = ch->rightvol*snd_vol;
		samples = chunk->sndChunk;
		((short *)&volume_vec)[0] = leftvol;
		((short *)&volume_vec)[1] = leftvol;
		((short *)&volume_vec)[4] = leftvol;
		((short *)&volume_vec)[5] = leftvol;
		((short *)&volume_vec)[2] = rightvol;
		((short *)&volume_vec)[3] = rightvol;
		((short *)&volume_vec)[6] = rightvol;
		((short *)&volume_vec)[7] = rightvol;
		volume_shift = vec_splat_u32(8);
		i = 0;

		while(i < count) {
			/* Try to align destination to 16-byte boundary */
			while(i < count && (((unsigned long)&samp[i] & 0x1f) || ((count-i) < 8) || ((SND_CHUNK_SIZE - sampleOffset) < 8))) {
				data  = samples[sampleOffset++];
				samp[i].left += (data * leftvol)>>8;

				if ( sc->soundChannels == 2 ) {
					data = samples[sampleOffset++];
				}
				samp[i].right += (data * rightvol)>>8;
	
				if (sampleOffset == SND_CHUNK_SIZE) {
					chunk = chunk->next;
					samples = chunk->sndChunk;
					sampleOffset = 0;
				}
				i++;
			}
			/* Destination is now aligned.  Process as many 8-sample 
			   chunks as we can before we run out of room from the current
			   sound chunk.  We do 8 per loop to avoid extra source data reads. */
			samplesLeft = count - i;
			chunkSamplesLeft = SND_CHUNK_SIZE - sampleOffset;
			if(samplesLeft > chunkSamplesLeft)
				samplesLeft = chunkSamplesLeft;
			
			vectorCount = samplesLeft / 8;
			
			if(vectorCount)
			{
				vector unsigned char tmp;
				vector short s0, s1, sampleData0, sampleData1;
				vector signed int merge0, merge1;
				vector signed int d0, d1, d2, d3;				
				vector unsigned char samplePermute0 =
					VECCONST_UINT8(0, 1, 4, 5, 0, 1, 4, 5, 2, 3, 6, 7, 2, 3, 6, 7);
				vector unsigned char samplePermute1 = 
					VECCONST_UINT8(8, 9, 12, 13, 8, 9, 12, 13, 10, 11, 14, 15, 10, 11, 14, 15);
				vector unsigned char loadPermute0, loadPermute1;
				
				// Rather than permute the vectors after we load them to do the sample
				// replication and rearrangement, we permute the alignment vector so
				// we do everything in one step below and avoid data shuffling.
				tmp = vec_lvsl(0,&samples[sampleOffset]);								
				loadPermute0 = vec_perm(tmp,tmp,samplePermute0);
				loadPermute1 = vec_perm(tmp,tmp,samplePermute1);
				
				s0 = *(vector short *)&samples[sampleOffset];
				while(vectorCount)
				{
					/* Load up source (16-bit) sample data */
					s1 = *(vector short *)&samples[sampleOffset+7];
					
					/* Load up destination sample data */
					d0 = *(vector signed int *)&samp[i];
					d1 = *(vector signed int *)&samp[i+2];
					d2 = *(vector signed int *)&samp[i+4];
					d3 = *(vector signed int *)&samp[i+6];

					sampleData0 = vec_perm(s0,s1,loadPermute0);
					sampleData1 = vec_perm(s0,s1,loadPermute1);
					
					merge0 = vec_mule(sampleData0,volume_vec);
					merge0 = vec_sra(merge0,volume_shift);	/* Shift down to proper range */
					
					merge1 = vec_mulo(sampleData0,volume_vec);
					merge1 = vec_sra(merge1,volume_shift);
					
					d0 = vec_add(merge0,d0);
					d1 = vec_add(merge1,d1);
					
					merge0 = vec_mule(sampleData1,volume_vec);
					merge0 = vec_sra(merge0,volume_shift);	/* Shift down to proper range */
					
					merge1 = vec_mulo(sampleData1,volume_vec);
					merge1 = vec_sra(merge1,volume_shift);					

					d2 = vec_add(merge0,d2);
					d3 = vec_add(merge1,d3);

					/* Store destination sample data */
					*(vector signed int *)&samp[i] = d0;
					*(vector signed int *)&samp[i+2] = d1;
					*(vector signed int *)&samp[i+4] = d2;
					*(vector signed int *)&samp[i+6] = d3;

					i += 8;
					vectorCount--;
					s0 = s1;
					sampleOffset += 8;
				}
				if (sampleOffset == SND_CHUNK_SIZE) {
					chunk = chunk->next;
					samples = chunk->sndChunk;
					sampleOffset = 0;
				}
			}
		}
	} else {
/* this code assume that stride % 16 == 0 */
void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
  POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1);
    DECLARE_ALIGNED_16(signed int, ABCD[4]) =
                        {((8 - x) * (8 - y)),
                          ((x) * (8 - y)),
                          ((8 - x) * (y)),
                          ((x) * (y))};
    register int i;
    vec_u8_t fperm;
    const vec_s32_t vABCD = vec_ld(0, ABCD);
    const vec_s16_t vA = vec_splat((vec_s16_t)vABCD, 1);
    const vec_s16_t vB = vec_splat((vec_s16_t)vABCD, 3);
    const vec_s16_t vC = vec_splat((vec_s16_t)vABCD, 5);
    const vec_s16_t vD = vec_splat((vec_s16_t)vABCD, 7);
    LOAD_ZERO;
    const vec_s16_t v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
    const vec_u16_t v6us = vec_splat_u16(6);
    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;

    vec_u8_t vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
    vec_u8_t vsrc0uc, vsrc1uc;
    vec_s16_t vsrc0ssH, vsrc1ssH;
    vec_u8_t vsrcCuc, vsrc2uc, vsrc3uc;
    vec_s16_t vsrc2ssH, vsrc3ssH, psum;
    vec_u8_t vdst, ppsum, vfdst, fsum;

  POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1);

    if (((unsigned long)dst) % 16 == 0) {
      fperm = (vec_u8_t)AVV(0x10, 0x11, 0x12, 0x13,
                            0x14, 0x15, 0x16, 0x17,
                            0x08, 0x09, 0x0A, 0x0B,
                            0x0C, 0x0D, 0x0E, 0x0F);
    } else {
      fperm = (vec_u8_t)AVV(0x00, 0x01, 0x02, 0x03,
                            0x04, 0x05, 0x06, 0x07,
                            0x18, 0x19, 0x1A, 0x1B,
                            0x1C, 0x1D, 0x1E, 0x1F);
    }

    vsrcAuc = vec_ld(0, src);

    if (loadSecond)
      vsrcBuc = vec_ld(16, src);
    vsrcperm0 = vec_lvsl(0, src);
    vsrcperm1 = vec_lvsl(1, src);

    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
    if (reallyBadAlign)
      vsrc1uc = vsrcBuc;
    else
      vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);

    vsrc0ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc0uc);
    vsrc1ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc1uc);

    if (!loadSecond) {// -> !reallyBadAlign
      for (i = 0 ; i < h ; i++) {


        vsrcCuc = vec_ld(stride + 0, src);

        vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
        vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);

        vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc2uc);
        vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc3uc);

        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v32ss, psum);
        psum = vec_sra(psum, v6us);

        vdst = vec_ld(0, dst);
        ppsum = (vec_u8_t)vec_packsu(psum, psum);
        vfdst = vec_perm(vdst, ppsum, fperm);

        OP_U8_ALTIVEC(fsum, vfdst, vdst);

        vec_st(fsum, 0, dst);

        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;

        dst += stride;
        src += stride;
      }
    } else {
        vec_u8_t vsrcDuc;
      for (i = 0 ; i < h ; i++) {
        vsrcCuc = vec_ld(stride + 0, src);
        vsrcDuc = vec_ld(stride + 16, src);

        vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
        if (reallyBadAlign)
          vsrc3uc = vsrcDuc;
        else
          vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);

        vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc2uc);
        vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc3uc);

        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v32ss, psum);
        psum = vec_sr(psum, v6us);

        vdst = vec_ld(0, dst);
        ppsum = (vec_u8_t)vec_pack(psum, psum);
        vfdst = vec_perm(vdst, ppsum, fperm);

        OP_U8_ALTIVEC(fsum, vfdst, vdst);

        vec_st(fsum, 0, dst);

        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;

        dst += stride;
        src += stride;
      }
    }
    POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1);
}
Beispiel #25
0
/* this code assume that stride % 16 == 0 */
void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
    signed int ABCD[4] __attribute__((aligned(16))) =
                        {((8 - x) * (8 - y)),
                          ((x) * (8 - y)),
                          ((8 - x) * (y)),
                          ((x) * (y))};
    register int i;
    vector unsigned char fperm;
    const vector signed int vABCD = vec_ld(0, ABCD);
    const vector signed short vA = vec_splat((vector signed short)vABCD, 1);
    const vector signed short vB = vec_splat((vector signed short)vABCD, 3);
    const vector signed short vC = vec_splat((vector signed short)vABCD, 5);
    const vector signed short vD = vec_splat((vector signed short)vABCD, 7);
    const vector signed int vzero = vec_splat_s32(0);
    const vector signed short v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
    const vector unsigned short v6us = vec_splat_u16(6);
    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;

    vector unsigned char vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
    vector unsigned char vsrc0uc, vsrc1uc;
    vector signed short vsrc0ssH, vsrc1ssH;
    vector unsigned char vsrcCuc, vsrc2uc, vsrc3uc;
    vector signed short vsrc2ssH, vsrc3ssH, psum;
    vector unsigned char vdst, ppsum, fsum;

    if (((unsigned long)dst) % 16 == 0) {
      fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13,
                                        0x14, 0x15, 0x16, 0x17,
                                        0x08, 0x09, 0x0A, 0x0B,
                                        0x0C, 0x0D, 0x0E, 0x0F);
    } else {
      fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03,
                                        0x04, 0x05, 0x06, 0x07,
                                        0x18, 0x19, 0x1A, 0x1B,
                                        0x1C, 0x1D, 0x1E, 0x1F);
    }

    vsrcAuc = vec_ld(0, src);

    if (loadSecond)
      vsrcBuc = vec_ld(16, src);
    vsrcperm0 = vec_lvsl(0, src);
    vsrcperm1 = vec_lvsl(1, src);

    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
    if (reallyBadAlign)
      vsrc1uc = vsrcBuc;
    else
      vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);

    vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
                                               (vector unsigned char)vsrc0uc);
    vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
                                               (vector unsigned char)vsrc1uc);

    if (!loadSecond) {// -> !reallyBadAlign
      for (i = 0 ; i < h ; i++) {


        vsrcCuc = vec_ld(stride + 0, src);

        vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
        vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);

        vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
                                                (vector unsigned char)vsrc2uc);
        vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
                                                (vector unsigned char)vsrc3uc);

        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v28ss, psum);
        psum = vec_sra(psum, v6us);

        vdst = vec_ld(0, dst);
        ppsum = (vector unsigned char)vec_packsu(psum, psum);
        fsum = vec_perm(vdst, ppsum, fperm);

        vec_st(fsum, 0, dst);

        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;

        dst += stride;
        src += stride;
      }
    } else {
        vector unsigned char vsrcDuc;
      for (i = 0 ; i < h ; i++) {
        vsrcCuc = vec_ld(stride + 0, src);
        vsrcDuc = vec_ld(stride + 16, src);

        vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
        if (reallyBadAlign)
          vsrc3uc = vsrcDuc;
        else
          vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);

        vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
                                                (vector unsigned char)vsrc2uc);
        vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
                                                (vector unsigned char)vsrc3uc);

        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v28ss, psum);
        psum = vec_sr(psum, v6us);

        vdst = vec_ld(0, dst);
        ppsum = (vector unsigned char)vec_pack(psum, psum);
        fsum = vec_perm(vdst, ppsum, fperm);

        vec_st(fsum, 0, dst);

        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;

        dst += stride;
        src += stride;
      }
    }
}
Beispiel #26
0
void iquant_intra_m1_altivec(IQUANT_INTRA_PDECL)
{
    int i;
    vector signed short vsrc;
    uint16_t *qmat;
    vector unsigned short vqmat;
    vector unsigned short vmquant;
    vector bool short eqzero, ltzero;
    vector signed short val, t0;
    vector signed short zero, one;
    vector unsigned int four;
    vector signed short min, max;
    int offset, offset2;
    int16_t dst0;
    union {
	vector unsigned short vu16;
	unsigned short mquant;
	vector signed int vs32;
	struct {
	    signed int pad[3];
	    signed int sum;
	} s;
    } vu;
#ifdef ALTIVEC_DST
    DataStreamControl dsc;
#endif

#ifdef ALTIVEC_VERIFY /* {{{ */
    if (NOT_VECTOR_ALIGNED(wsp->intra_q_mat))
	mjpeg_error_exit1("iquant_intra_m1: wsp->intra_q_mat %% 16 != 0, (%d)",
	    wsp->intra_q_mat);

    if (NOT_VECTOR_ALIGNED(src))
	mjpeg_error_exit1("iquant_intra_m1: src %% 16 != 0, (%d)", src);

    if (NOT_VECTOR_ALIGNED(dst))
	mjpeg_error_exit1("iquant_intra_m1: dst %% 16 != 0, (%d)", dst);

    for (i = 0; i < 64; i++)
	if (src[i] < -256 || src[i] > 255)
	    mjpeg_error_exit1("iquant_intra_m2: -256 > src[%i] > 255, (%d)",
		i, src[i]);
#endif /* }}} */

    AMBER_START;

    dst0 = src[0] << (3 - dc_prec);

    qmat = (uint16_t*)wsp->intra_q_mat;

#ifdef ALTIVEC_DST
    dsc.control = DATA_STREAM_CONTROL(64/8,1,0);
    vec_dst(src, dsc.control, 0);
    vec_dst(qmat, dsc.control, 1);
#endif

    /* vmquant = (vector unsigned short)(mquant); */
    vu.mquant = (unsigned short)mquant;
    vmquant = vec_splat(vu.vu16, 0);

    zero = vec_splat_s16(0);
    one = vec_splat_s16(1);
    four = vec_splat_u32(4);
    /* max = (2047); min = (-2048); {{{ */
    vu8(max) = vec_splat_u8(0x7);
    t0 = vec_splat_s16(-1); /* 0xffff */
    vu8(max) = vec_mergeh(vu8(max), vu8(t0)); /* 0x07ff == 2047 */
    min = vec_sub(t0, max);
    /* }}} */
    offset = 0;

#if 1
    vsrc = vec_ld(offset, (signed short*)src);
    vqmat = vec_ld(offset, (unsigned short*)qmat);
    i = (64/8) - 1;
    do {
	/* intra_q[i] * mquant */
	vu16(vqmat) = vec_mulo(vu8(vqmat), vu8(vmquant));

	/* save sign */
	ltzero = vec_cmplt(vsrc, zero);
	eqzero = vec_cmpeq(vsrc, zero);

	/* val = abs(src) */
	t0 = vec_sub(zero, vsrc);
	val = vec_max(t0, vsrc);

	/* val = (src * quant) >> 4 */
	vs32(t0) = vec_mule(val, vs16(vqmat));
	vs32(val) = vec_mulo(val, vs16(vqmat));
	vs32(t0) = vec_sra(vs32(t0), four);
	vs16(t0) = vec_pack(vs32(t0), vs32(t0));
	vs32(val) = vec_sra(vs32(val), four);
	vs16(val) = vec_pack(vs32(val), vs32(val));
	val = vec_mergeh(vs16(t0), vs16(val));

	offset2 = offset;
	offset += 8*sizeof(int16_t);
	vsrc = vec_ld(offset, (signed short*)src);
	vqmat = vec_ld(offset, (unsigned short*)qmat);

	/* val = val - 1&~(val|val==0) */
	t0 = vec_or(val, eqzero);
	t0 = vec_andc(one, t0);
	val = vec_sub(val, t0);

	/* restore sign */
	t0 = vec_sub(zero, val);
	val = vec_sel(val, t0, ltzero);

	/* val = (val > 2047) ? ((val < -2048) ? -2048 : val); */
	val = vec_min(val, max);
	val = vec_max(val, min);

	vec_st(val, offset2, dst);
    } while (--i);
    /* intra_q[i] * mquant */
    vu16(vqmat) = vec_mulo(vu8(vqmat), vu8(vmquant));

    /* save sign */
    ltzero = vec_cmplt(vsrc, zero);
    eqzero = vec_cmpeq(vsrc, zero);

    /* val = abs(src) */
    t0 = vec_sub(zero, vsrc);
    val = vec_max(t0, vsrc);

    /* val = (src * quant) >> 4 */
    vs32(t0) = vec_mule(val, vs16(vqmat));
    vs32(val) = vec_mulo(val, vs16(vqmat));
    vs32(t0) = vec_sra(vs32(t0), four);
    vs16(t0) = vec_pack(vs32(t0), vs32(t0));
    vs32(val) = vec_sra(vs32(val), four);
    vs16(val) = vec_pack(vs32(val), vs32(val));
    val = vec_mergeh(vs16(t0), vs16(val));

    /* val = val - 1&~(val|val==0) */
    t0 = vec_or(val, eqzero);
    t0 = vec_andc(one, t0);
    val = vec_sub(val, t0);

    /* restore sign */
    t0 = vec_sub(zero, val);
    val = vec_sel(val, t0, ltzero);

    /* val = (val > 2047) ? ((val < -2048) ? -2048 : val); */
    val = vec_min(val, max);
    val = vec_max(val, min);

    vec_st(val, offset, dst);
#else
    /* {{{ */
    i = (64/8);
    do {
	vsrc = vec_ld(offset, (signed short*)src);
	vqmat = vec_ld(offset, (unsigned short*)qmat);

	/* intra_q[i] * mquant */
	vu16(vqmat) = vec_mulo(vu8(vqmat), vu8(vmquant));

	/* save sign */
	ltzero = vec_cmplt(vsrc, zero);
	eqzero = vec_cmpeq(vsrc, zero);

	/* val = abs(src) */
	t0 = vec_sub(zero, vsrc);
	val = vec_max(t0, vsrc);

	/* val = (src * quant) >> 4 */
	vs32(t0) = vec_mule(val, vs16(vqmat));
	vs32(val) = vec_mulo(val, vs16(vqmat));
	vs32(t0) = vec_sra(vs32(t0), four);
	vs16(t0) = vec_pack(vs32(t0), vs32(t0));
	vs32(val) = vec_sra(vs32(val), four);
	vs16(val) = vec_pack(vs32(val), vs32(val));
	val = vec_mergeh(vs16(t0), vs16(val));

	/* val = val - 1&~(val|val==0) */
	t0 = vec_or(val, eqzero);
	t0 = vec_andc(one, t0);
	val = vec_sub(val, t0);

	/* restore sign */
	t0 = vec_sub(zero, val);
	val = vec_sel(val, t0, ltzero);

	/* val = (val > 2047) ? ((val < -2048) ? -2048 : val); */
	val = vec_min(val, max);
	val = vec_max(val, min);

	vec_st(val, offset, dst);

	offset += 8*sizeof(int16_t);
    } while (--i);
    /* }}} */
#endif

    dst[0] = dst0;

    AMBER_STOP;
}
Beispiel #27
0
static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) {
    register int i;
    LOAD_ZERO;
    const vec_u8 permM2 = vec_lvsl(-2, src);
    const vec_u8 permM1 = vec_lvsl(-1, src);
    const vec_u8 permP0 = vec_lvsl(+0, src);
    const vec_u8 permP1 = vec_lvsl(+1, src);
    const vec_u8 permP2 = vec_lvsl(+2, src);
    const vec_u8 permP3 = vec_lvsl(+3, src);
    const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
    const vec_u32 v10ui = vec_splat_u32(10);
    const vec_s16 v5ss = vec_splat_s16(5);
    const vec_s16 v1ss = vec_splat_s16(1);
    const vec_s32 v512si = vec_sl(vec_splat_s32(1),vec_splat_u32(9));
    const vec_u32 v16ui = vec_sl(vec_splat_u32(1),vec_splat_u32(4));

    register int align = ((((unsigned long)src) - 2) % 16);

    vec_s16 srcP0A, srcP0B, srcP1A, srcP1B,
              srcP2A, srcP2B, srcP3A, srcP3B,
              srcM1A, srcM1B, srcM2A, srcM2B,
              sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
              pp1A, pp1B, pp2A, pp2B, psumA, psumB;

    const vec_u8 mperm = (const vec_u8)
        {0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
         0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F};
    int16_t *tmpbis = tmp;

    vec_s16 tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB,
              tmpP0ssA, tmpP0ssB, tmpP1ssA, tmpP1ssB,
              tmpP2ssA, tmpP2ssB;

    vec_s32 pp1Ae, pp1Ao, pp1Be, pp1Bo, pp2Ae, pp2Ao, pp2Be, pp2Bo,
              pp3Ae, pp3Ao, pp3Be, pp3Bo, pp1cAe, pp1cAo, pp1cBe, pp1cBo,
              pp32Ae, pp32Ao, pp32Be, pp32Bo, sumAe, sumAo, sumBe, sumBo,
              ssumAe, ssumAo, ssumBe, ssumBo;
    vec_u8 fsum, sumv, sum;
    vec_s16 ssume, ssumo;

    src -= (2 * srcStride);
    for (i = 0 ; i < 21 ; i ++) {
        vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
        vec_u8 srcR1 = vec_ld(-2, src);
        vec_u8 srcR2 = vec_ld(14, src);

        switch (align) {
        default: {
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = vec_perm(srcR1, srcR2, permP1);
            srcP2 = vec_perm(srcR1, srcR2, permP2);
            srcP3 = vec_perm(srcR1, srcR2, permP3);
        } break;
        case 11: {
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = vec_perm(srcR1, srcR2, permP1);
            srcP2 = vec_perm(srcR1, srcR2, permP2);
            srcP3 = srcR2;
        } break;
        case 12: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = vec_perm(srcR1, srcR2, permP1);
            srcP2 = srcR2;
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        case 13: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = srcR2;
            srcP2 = vec_perm(srcR2, srcR3, permP2);
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        case 14: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = srcR2;
            srcP1 = vec_perm(srcR2, srcR3, permP1);
            srcP2 = vec_perm(srcR2, srcR3, permP2);
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        case 15: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = srcR2;
            srcP0 = vec_perm(srcR2, srcR3, permP0);
            srcP1 = vec_perm(srcR2, srcR3, permP1);
            srcP2 = vec_perm(srcR2, srcR3, permP2);
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        }

        srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0);
        srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0);
        srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1);
        srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1);

        srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2);
        srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2);
        srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3);
        srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3);

        srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1);
        srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1);
        srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2);
        srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2);

        sum1A = vec_adds(srcP0A, srcP1A);
        sum1B = vec_adds(srcP0B, srcP1B);
        sum2A = vec_adds(srcM1A, srcP2A);
        sum2B = vec_adds(srcM1B, srcP2B);
        sum3A = vec_adds(srcM2A, srcP3A);
        sum3B = vec_adds(srcM2B, srcP3B);

        pp1A = vec_mladd(sum1A, v20ss, sum3A);
        pp1B = vec_mladd(sum1B, v20ss, sum3B);

        pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
        pp2B = vec_mladd(sum2B, v5ss, zero_s16v);

        psumA = vec_sub(pp1A, pp2A);
        psumB = vec_sub(pp1B, pp2B);

        vec_st(psumA, 0, tmp);
        vec_st(psumB, 16, tmp);

        src += srcStride;
        tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */
    }

    tmpM2ssA = vec_ld(0, tmpbis);
    tmpM2ssB = vec_ld(16, tmpbis);
    tmpbis += tmpStride;
    tmpM1ssA = vec_ld(0, tmpbis);
    tmpM1ssB = vec_ld(16, tmpbis);
    tmpbis += tmpStride;
    tmpP0ssA = vec_ld(0, tmpbis);
    tmpP0ssB = vec_ld(16, tmpbis);
    tmpbis += tmpStride;
    tmpP1ssA = vec_ld(0, tmpbis);
    tmpP1ssB = vec_ld(16, tmpbis);
    tmpbis += tmpStride;
    tmpP2ssA = vec_ld(0, tmpbis);
    tmpP2ssB = vec_ld(16, tmpbis);
    tmpbis += tmpStride;

    for (i = 0 ; i < 16 ; i++) {
        const vec_s16 tmpP3ssA = vec_ld(0, tmpbis);
        const vec_s16 tmpP3ssB = vec_ld(16, tmpbis);

        const vec_s16 sum1A = vec_adds(tmpP0ssA, tmpP1ssA);
        const vec_s16 sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
        const vec_s16 sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
        const vec_s16 sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
        const vec_s16 sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
        const vec_s16 sum3B = vec_adds(tmpM2ssB, tmpP3ssB);

        tmpbis += tmpStride;

        tmpM2ssA = tmpM1ssA;
        tmpM2ssB = tmpM1ssB;
        tmpM1ssA = tmpP0ssA;
        tmpM1ssB = tmpP0ssB;
        tmpP0ssA = tmpP1ssA;
        tmpP0ssB = tmpP1ssB;
        tmpP1ssA = tmpP2ssA;
        tmpP1ssB = tmpP2ssB;
        tmpP2ssA = tmpP3ssA;
        tmpP2ssB = tmpP3ssB;

        pp1Ae = vec_mule(sum1A, v20ss);
        pp1Ao = vec_mulo(sum1A, v20ss);
        pp1Be = vec_mule(sum1B, v20ss);
        pp1Bo = vec_mulo(sum1B, v20ss);

        pp2Ae = vec_mule(sum2A, v5ss);
        pp2Ao = vec_mulo(sum2A, v5ss);
        pp2Be = vec_mule(sum2B, v5ss);
        pp2Bo = vec_mulo(sum2B, v5ss);

        pp3Ae = vec_sra((vec_s32)sum3A, v16ui);
        pp3Ao = vec_mulo(sum3A, v1ss);
        pp3Be = vec_sra((vec_s32)sum3B, v16ui);
        pp3Bo = vec_mulo(sum3B, v1ss);

        pp1cAe = vec_add(pp1Ae, v512si);
        pp1cAo = vec_add(pp1Ao, v512si);
        pp1cBe = vec_add(pp1Be, v512si);
        pp1cBo = vec_add(pp1Bo, v512si);

        pp32Ae = vec_sub(pp3Ae, pp2Ae);
        pp32Ao = vec_sub(pp3Ao, pp2Ao);
        pp32Be = vec_sub(pp3Be, pp2Be);
        pp32Bo = vec_sub(pp3Bo, pp2Bo);

        sumAe = vec_add(pp1cAe, pp32Ae);
        sumAo = vec_add(pp1cAo, pp32Ao);
        sumBe = vec_add(pp1cBe, pp32Be);
        sumBo = vec_add(pp1cBo, pp32Bo);

        ssumAe = vec_sra(sumAe, v10ui);
        ssumAo = vec_sra(sumAo, v10ui);
        ssumBe = vec_sra(sumBe, v10ui);
        ssumBo = vec_sra(sumBo, v10ui);

        ssume = vec_packs(ssumAe, ssumBe);
        ssumo = vec_packs(ssumAo, ssumBo);

        sumv = vec_packsu(ssume, ssumo);
        sum = vec_perm(sumv, sumv, mperm);

        ASSERT_ALIGNED(dst);

        OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));

        vec_st(fsum, 0, dst);

        dst += dstStride;
    }
}
Beispiel #28
0
/* next one assumes that ((line_size % 16) == 0) */
static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, ptrdiff_t line_size, int h)
{
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
    register vector unsigned char blockv, temp1, temp2;
    register vector unsigned short temp3, temp4,
        pixelssum1, pixelssum2, pixelssum3, pixelssum4;
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
    register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
    register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);

    temp1 = vec_ld(0, pixels);
    temp2 = vec_ld(16, pixels);
    pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
    if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
        pixelsv2 = temp2;
    } else {
        pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
    }
    pixelsv3 = vec_mergel(vczero, pixelsv1);
    pixelsv4 = vec_mergel(vczero, pixelsv2);
    pixelsv1 = vec_mergeh(vczero, pixelsv1);
    pixelsv2 = vec_mergeh(vczero, pixelsv2);
    pixelssum3 = vec_add((vector unsigned short)pixelsv3,
                         (vector unsigned short)pixelsv4);
    pixelssum3 = vec_add(pixelssum3, vcone);
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vcone);

    for (i = 0; i < h ; i++) {
        blockv = vec_ld(0, block);

        temp1 = vec_ld(line_size, pixels);
        temp2 = vec_ld(line_size + 16, pixels);
        pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
        if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
            pixelsv2 = temp2;
        } else {
            pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
        }

        pixelsv3 = vec_mergel(vczero, pixelsv1);
        pixelsv4 = vec_mergel(vczero, pixelsv2);
        pixelsv1 = vec_mergeh(vczero, pixelsv1);
        pixelsv2 = vec_mergeh(vczero, pixelsv2);

        pixelssum4 = vec_add((vector unsigned short)pixelsv3,
                             (vector unsigned short)pixelsv4);
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp4 = vec_add(pixelssum3, pixelssum4);
        temp4 = vec_sra(temp4, vctwo);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);

        pixelssum3 = vec_add(pixelssum4, vcone);
        pixelssum1 = vec_add(pixelssum2, vcone);

        blockv = vec_packsu(temp3, temp4);

        vec_st(blockv, 0, block);

        block += line_size;
        pixels += line_size;
    }
}
// CHECK-LABEL: define void @test1
void test1() {

  /* vec_cmpeq */
  res_vbll = vec_cmpeq(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpequd
// CHECK-LE: @llvm.ppc.altivec.vcmpequd
// CHECK-PPC: error: call to 'vec_cmpeq' is ambiguous

  res_vbll = vec_cmpeq(vull, vull);
// CHECK: @llvm.ppc.altivec.vcmpequd
// CHECK-LE: @llvm.ppc.altivec.vcmpequd
// CHECK-PPC: error: call to 'vec_cmpeq' is ambiguous

  /* vec_cmpgt */
  res_vbll = vec_cmpgt(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd
// CHECK-PPC: error: call to 'vec_cmpgt' is ambiguous

  res_vbll = vec_cmpgt(vull, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud
// CHECK-PPC: error: call to 'vec_cmpgt' is ambiguous

  /* ----------------------- predicates --------------------------- */
  /* vec_all_eq */
  res_i = vec_all_eq(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_eq' is ambiguous

  res_i = vec_all_eq(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_eq' is ambiguous

  res_i = vec_all_eq(vull, vull);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_eq' is ambiguous

  res_i = vec_all_eq(vull, vbll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_eq' is ambiguous

  res_i = vec_all_eq(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_eq' is ambiguous

  res_i = vec_all_eq(vbll, vull);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_eq' is ambiguous

  res_i = vec_all_eq(vbll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_eq' is ambiguous

  /* vec_all_ne */
  res_i = vec_all_ne(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_ne' is ambiguous

  res_i = vec_all_ne(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_ne' is ambiguous

  res_i = vec_all_ne(vull, vull);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_ne' is ambiguous

  res_i = vec_all_ne(vull, vbll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_ne' is ambiguous

  res_i = vec_all_ne(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_ne' is ambiguous

  res_i = vec_all_ne(vbll, vull);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_ne' is ambiguous

  res_i = vec_all_ne(vbll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_ne' is ambiguous

  /* vec_any_eq */
  res_i = vec_any_eq(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_eq' is ambiguous

  res_i = vec_any_eq(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_eq' is ambiguous

  res_i = vec_any_eq(vull, vull);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_eq' is ambiguous

  res_i = vec_any_eq(vull, vbll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_eq' is ambiguous

  res_i = vec_any_eq(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_eq' is ambiguous

  res_i = vec_any_eq(vbll, vull);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_eq' is ambiguous

  res_i = vec_any_eq(vbll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_eq' is ambiguous

  /* vec_any_ne */
  res_i = vec_any_ne(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_ne' is ambiguous

  res_i = vec_any_ne(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_ne' is ambiguous

  res_i = vec_any_ne(vull, vull);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_ne' is ambiguous

  res_i = vec_any_ne(vull, vbll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_ne' is ambiguous

  res_i = vec_any_ne(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_ne' is ambiguous

  res_i = vec_any_ne(vbll, vull);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_ne' is ambiguous

  res_i = vec_any_ne(vbll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_ne' is ambiguous

  /* vec_all_ge */
  res_i = vec_all_ge(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_all_ge' is ambiguous

  res_i = vec_all_ge(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_all_ge' is ambiguous

  res_i = vec_all_ge(vull, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_ge' is ambiguous

  res_i = vec_all_ge(vull, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_ge' is ambiguous

  res_i = vec_all_ge(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_ge' is ambiguous

  res_i = vec_all_ge(vbll, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_ge' is ambiguous

  res_i = vec_all_ge(vbll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_ge' is ambiguous

  /* vec_all_gt */
  res_i = vec_all_gt(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_all_gt' is ambiguous

  res_i = vec_all_gt(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_all_gt' is ambiguous

  res_i = vec_all_gt(vull, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_gt' is ambiguous

  res_i = vec_all_gt(vull, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_gt' is ambiguous

  res_i = vec_all_gt(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_gt' is ambiguous

  res_i = vec_all_gt(vbll, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_gt' is ambiguous

  res_i = vec_all_gt(vbll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_gt' is ambiguous

  /* vec_all_le */
  res_i = vec_all_le(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_all_le' is ambiguous

  res_i = vec_all_le(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_all_le' is ambiguous

  res_i = vec_all_le(vull, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_le' is ambiguous

  res_i = vec_all_le(vull, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_le' is ambiguous

  res_i = vec_all_le(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_le' is ambiguous

  res_i = vec_all_le(vbll, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_le' is ambiguous

  res_i = vec_all_le(vbll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_le' is ambiguous

  /* vec_all_lt */
  res_i = vec_all_lt(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_all_lt' is ambiguous

  res_i = vec_all_lt(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_all_lt' is ambiguous

  res_i = vec_all_lt(vull, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_lt' is ambiguous

  res_i = vec_all_lt(vull, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_lt' is ambiguous

  res_i = vec_all_lt(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_lt' is ambiguous

  res_i = vec_all_lt(vbll, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_lt' is ambiguous

  res_i = vec_all_lt(vbll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_lt' is ambiguous

  /* vec_any_ge */
  res_i = vec_any_ge(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_any_ge' is ambiguous

  res_i = vec_any_ge(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_any_ge' is ambiguous

  res_i = vec_any_ge(vull, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_ge' is ambiguous

  res_i = vec_any_ge(vull, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_ge' is ambiguous

  res_i = vec_any_ge(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_ge' is ambiguous

  res_i = vec_any_ge(vbll, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_ge' is ambiguous

  res_i = vec_any_ge(vbll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_ge' is ambiguous

  /* vec_any_gt */
  res_i = vec_any_gt(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_any_gt' is ambiguous

  res_i = vec_any_gt(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_any_gt' is ambiguous

  res_i = vec_any_gt(vull, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_gt' is ambiguous

  res_i = vec_any_gt(vull, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_gt' is ambiguous

  res_i = vec_any_gt(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_gt' is ambiguous

  res_i = vec_any_gt(vbll, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_gt' is ambiguous

  res_i = vec_any_gt(vbll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_gt' is ambiguous

  /* vec_any_le */
  res_i = vec_any_le(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_any_le' is ambiguous

  res_i = vec_any_le(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_any_le' is ambiguous

  res_i = vec_any_le(vull, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_le' is ambiguous

  res_i = vec_any_le(vull, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_le' is ambiguous

  res_i = vec_any_le(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_le' is ambiguous

  res_i = vec_any_le(vbll, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_le' is ambiguous

  res_i = vec_any_le(vbll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_le' is ambiguous

  /* vec_any_lt */
  res_i = vec_any_lt(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_any_lt' is ambiguous

  res_i = vec_any_lt(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p
// CHECK-PPC: error: call to 'vec_any_lt' is ambiguous

  res_i = vec_any_lt(vull, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_lt' is ambiguous

  res_i = vec_any_lt(vull, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_lt' is ambiguous

  res_i = vec_any_lt(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_lt' is ambiguous

  res_i = vec_any_lt(vbll, vull);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_lt' is ambiguous

  res_i = vec_any_lt(vbll, vbll);
// CHECK: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_lt' is ambiguous

  /* vec_max */
  res_vsll = vec_max(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vmaxsd
// CHECK-LE: @llvm.ppc.altivec.vmaxsd
// CHECK-PPC: error: call to 'vec_max' is ambiguous

  res_vsll = vec_max(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vmaxsd
// CHECK-LE: @llvm.ppc.altivec.vmaxsd
// CHECK-PPC: error: call to 'vec_max' is ambiguous

  res_vsll = vec_max(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vmaxsd
// CHECK-LE: @llvm.ppc.altivec.vmaxsd
// CHECK-PPC: error: call to 'vec_max' is ambiguous

  res_vull = vec_max(vull, vull);
// CHECK: @llvm.ppc.altivec.vmaxud
// CHECK-LE: @llvm.ppc.altivec.vmaxud
// CHECK-PPC: error: call to 'vec_max' is ambiguous

  res_vull = vec_max(vbll, vull);
// CHECK: @llvm.ppc.altivec.vmaxud
// CHECK-LE: @llvm.ppc.altivec.vmaxud
// CHECK-PPC: error: call to 'vec_max' is ambiguous

  res_vull = vec_max(vull, vbll);
// CHECK: @llvm.ppc.altivec.vmaxud
// CHECK-LE: @llvm.ppc.altivec.vmaxud
// CHECK-PPC: error: call to 'vec_max' is ambiguous

  /* vec_min */
  res_vsll = vec_min(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vminsd
// CHECK-LE: @llvm.ppc.altivec.vminsd
// CHECK-PPC: error: call to 'vec_min' is ambiguous

  res_vsll = vec_min(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vminsd
// CHECK-LE: @llvm.ppc.altivec.vminsd
// CHECK-PPC: error: call to 'vec_min' is ambiguous

  res_vsll = vec_min(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vminsd
// CHECK-LE: @llvm.ppc.altivec.vminsd
// CHECK-PPC: error: call to 'vec_min' is ambiguous

  res_vull = vec_min(vull, vull);
// CHECK: @llvm.ppc.altivec.vminud
// CHECK-LE: @llvm.ppc.altivec.vminud
// CHECK-PPC: error: call to 'vec_min' is ambiguous

  res_vull = vec_min(vbll, vull);
// CHECK: @llvm.ppc.altivec.vminud
// CHECK-LE: @llvm.ppc.altivec.vminud
// CHECK-PPC: error: call to 'vec_min' is ambiguous

  res_vull = vec_min(vull, vbll);
// CHECK: @llvm.ppc.altivec.vminud
// CHECK-LE: @llvm.ppc.altivec.vminud
// CHECK-PPC: error: call to 'vec_min' is ambiguous

  /* vec_mule */
  res_vsll = vec_mule(vi, vi);
// CHECK: @llvm.ppc.altivec.vmulesw
// CHECK-LE: @llvm.ppc.altivec.vmulosw
// CHECK-PPC: error: call to 'vec_mule' is ambiguous

  res_vull = vec_mule(vui , vui);
// CHECK: @llvm.ppc.altivec.vmuleuw
// CHECK-LE: @llvm.ppc.altivec.vmulouw
// CHECK-PPC: error: call to 'vec_mule' is ambiguous

  /* vec_mulo */
  res_vsll = vec_mulo(vi, vi);
// CHECK: @llvm.ppc.altivec.vmulosw
// CHECK-LE: @llvm.ppc.altivec.vmulesw
// CHECK-PPC: error: call to 'vec_mulo' is ambiguous

  res_vull = vec_mulo(vui, vui);
// CHECK: @llvm.ppc.altivec.vmulouw
// CHECK-LE: @llvm.ppc.altivec.vmuleuw
// CHECK-PPC: error: call to 'vec_mulo' is ambiguous

  /* vec_packs */
  res_vi = vec_packs(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vpksdss
// CHECK-LE: @llvm.ppc.altivec.vpksdss
// CHECK-PPC: error: call to 'vec_packs' is ambiguous

  res_vui = vec_packs(vull, vull);
// CHECK: @llvm.ppc.altivec.vpkudus
// CHECK-LE: @llvm.ppc.altivec.vpkudus
// CHECK-PPC: error: call to 'vec_packs' is ambiguous

  /* vec_packsu */
  res_vui = vec_packsu(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vpksdus
// CHECK-LE: @llvm.ppc.altivec.vpksdus
// CHECK-PPC: error: call to 'vec_packsu' is ambiguous

  res_vui = vec_packsu(vull, vull);
// CHECK: @llvm.ppc.altivec.vpkudus
// CHECK-LE: @llvm.ppc.altivec.vpkudus
// CHECK-PPC: error: call to 'vec_packsu' is ambiguous

  /* vec_rl */
  res_vsll = vec_rl(vsll, vull);
// CHECK: @llvm.ppc.altivec.vrld
// CHECK-LE: @llvm.ppc.altivec.vrld
// CHECK-PPC: error: call to 'vec_rl' is ambiguous

  res_vull = vec_rl(vull, vull);
// CHECK: @llvm.ppc.altivec.vrld
// CHECK-LE: @llvm.ppc.altivec.vrld
// CHECK-PPC: error: call to 'vec_rl' is ambiguous

  /* vec_sl */
  res_vsll = vec_sl(vsll, vull);
// CHECK: shl <2 x i64>
// CHECK-LE: shl <2 x i64>
// CHECK-PPC: error: call to 'vec_sl' is ambiguous

  res_vull = vec_sl(vull, vull);
// CHECK: shl <2 x i64>
// CHECK-LE: shl <2 x i64>
// CHECK-PPC: error: call to 'vec_sl' is ambiguous

  /* vec_sr */
  res_vsll = vec_sr(vsll, vull);
// CHECK: ashr <2 x i64>
// CHECK-LE: ashr <2 x i64>
// CHECK-PPC: error: call to 'vec_sr' is ambiguous

  res_vull = vec_sr(vull, vull);
// CHECK: lshr <2 x i64>
// CHECK-LE: lshr <2 x i64>
// CHECK-PPC: error: call to 'vec_sr' is ambiguous

  /* vec_sra */
  res_vsll = vec_sra(vsll, vull);
// CHECK: ashr <2 x i64>
// CHECK-LE: ashr <2 x i64>
// CHECK-PPC: error: call to 'vec_sra' is ambiguous

  res_vull = vec_sra(vull, vull);
// CHECK: ashr <2 x i64>
// CHECK-LE: ashr <2 x i64>
// CHECK-PPC: error: call to 'vec_sra' is ambiguous

  /* vec_unpackh */
  res_vsll = vec_unpackh(vi);
// CHECK: llvm.ppc.altivec.vupkhsw
// CHECK-LE: llvm.ppc.altivec.vupklsw
// CHECK-PPC: error: call to 'vec_unpackh' is ambiguous

  res_vbll = vec_unpackh(vbi);
// CHECK: llvm.ppc.altivec.vupkhsw
// CHECK-LE: llvm.ppc.altivec.vupklsw
// CHECK-PPC: error: call to 'vec_unpackh' is ambiguous

  /* vec_unpackl */
  res_vsll = vec_unpackl(vi);
// CHECK: llvm.ppc.altivec.vupklsw
// CHECK-LE: llvm.ppc.altivec.vupkhsw
// CHECK-PPC: error: call to 'vec_unpackl' is ambiguous

  res_vbll = vec_unpackl(vbi);
// CHECK: llvm.ppc.altivec.vupklsw
// CHECK-LE: llvm.ppc.altivec.vupkhsw
// CHECK-PPC: error: call to 'vec_unpackl' is ambiguous

  /* vec_vpksdss */
  res_vi = vec_vpksdss(vsll, vsll);
// CHECK: llvm.ppc.altivec.vpksdss
// CHECK-LE: llvm.ppc.altivec.vpksdss
// CHECK-PPC: warning: implicit declaration of function 'vec_vpksdss'

  /* vec_vpksdus */
  res_vui = vec_vpksdus(vsll, vsll);
// CHECK: llvm.ppc.altivec.vpksdus
// CHECK-LE: llvm.ppc.altivec.vpksdus
// CHECK-PPC: warning: implicit declaration of function 'vec_vpksdus'

  /* vec_vpkudum */
  res_vi = vec_vpkudum(vsll, vsll);
// CHECK: vperm
// CHECK-LE: vperm
// CHECK-PPC: warning: implicit declaration of function 'vec_vpkudum'

  res_vui = vec_vpkudum(vull, vull);
// CHECK: vperm
// CHECK-LE: vperm

  res_vui = vec_vpkudus(vull, vull);
// CHECK: llvm.ppc.altivec.vpkudus
// CHECK-LE: llvm.ppc.altivec.vpkudus
// CHECK-PPC: warning: implicit declaration of function 'vec_vpkudus'

  /* vec_vupkhsw */
  res_vsll = vec_vupkhsw(vi);
// CHECK: llvm.ppc.altivec.vupkhsw
// CHECK-LE: llvm.ppc.altivec.vupklsw
// CHECK-PPC: warning: implicit declaration of function 'vec_vupkhsw'

  res_vbll = vec_vupkhsw(vbi);
// CHECK: llvm.ppc.altivec.vupkhsw
// CHECK-LE: llvm.ppc.altivec.vupklsw

  /* vec_vupklsw */
  res_vsll = vec_vupklsw(vi);
// CHECK: llvm.ppc.altivec.vupklsw
// CHECK-LE: llvm.ppc.altivec.vupkhsw
// CHECK-PPC: warning: implicit declaration of function 'vec_vupklsw'

  res_vbll = vec_vupklsw(vbi);
// CHECK: llvm.ppc.altivec.vupklsw
// CHECK-LE: llvm.ppc.altivec.vupkhsw

  /* vec_max */
  res_vsll = vec_max(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vmaxsd
// CHECK-LE: @llvm.ppc.altivec.vmaxsd

  res_vsll = vec_max(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vmaxsd
// CHECK-LE: @llvm.ppc.altivec.vmaxsd

  res_vsll = vec_max(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vmaxsd
// CHECK-LE: @llvm.ppc.altivec.vmaxsd

  res_vull = vec_max(vull, vull);
// CHECK: @llvm.ppc.altivec.vmaxud
// CHECK-LE: @llvm.ppc.altivec.vmaxud

  res_vull = vec_max(vbll, vull);
// CHECK: @llvm.ppc.altivec.vmaxud
// CHECK-LE: @llvm.ppc.altivec.vmaxud

  /* vec_min */
  res_vsll = vec_min(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vminsd
// CHECK-LE: @llvm.ppc.altivec.vminsd

  res_vsll = vec_min(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vminsd
// CHECK-LE: @llvm.ppc.altivec.vminsd

  res_vsll = vec_min(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vminsd
// CHECK-LE: @llvm.ppc.altivec.vminsd

  res_vull = vec_min(vull, vull);
// CHECK: @llvm.ppc.altivec.vminud
// CHECK-LE: @llvm.ppc.altivec.vminud

  res_vull = vec_min(vbll, vull);
// CHECK: @llvm.ppc.altivec.vminud
// CHECK-LE: @llvm.ppc.altivec.vminud

}
Beispiel #30
0
/* start of optimized motionblur */
void pix_motionblur :: processYUVAltivec(imageStruct &image)
{
  int h,w,width;
  signed short rightGain,imageGain;
  unsigned char *saved = m_savedImage.data;

  m_savedImage.xsize=image.xsize;
  m_savedImage.ysize=image.ysize;
  m_savedImage.setCsizeByFormat(image.format);
  m_savedImage.reallocate();
  if(saved!=m_savedImage.data) {
    m_savedImage.setBlack();
  }
  saved=m_savedImage.data;

  width = image.xsize/8;
  /*
  // hmm: why does it read 235 ?
  rightGain = (signed short)(235. * m_motionblur);
  imageGain = (signed short) (255. - (235. * m_motionblur));
  */
  rightGain = m_blur1;
  imageGain = m_blur0;

  union {
    signed short        elements[8];
    vector      signed short v;
  } shortBuffer;

  union {
    unsigned int        elements[4];
    vector      unsigned int v;
  } bitBuffer;

  register vector signed short gainAdd, hiImage, loImage,hiRight,loRight,
           YImage, UVImage;
  // register vector signed short loadhiImage, loadloImage,loadhiRight,loadloRight;
  register vector unsigned char loadImage, loadRight;
  register vector unsigned char zero = vec_splat_u8(0);
  register vector signed int UVhi,UVlo,Yhi,Ylo;
  register vector signed int UVhiR,UVloR,YhiR,YloR;
  register vector signed short gainSub,gain,gainR;//,d;
  register vector unsigned int bitshift;
  vector unsigned char *inData = (vector unsigned char*) image.data;
  vector unsigned char *rightData = (vector unsigned char*) saved;


  shortBuffer.elements[0] = 128;
  shortBuffer.elements[1] = 0;
  shortBuffer.elements[2] = 128;
  shortBuffer.elements[3] = 0;
  shortBuffer.elements[4] = 128;
  shortBuffer.elements[5] = 0;
  shortBuffer.elements[6] = 128;
  shortBuffer.elements[7] = 0;

  gainSub = shortBuffer.v;

  shortBuffer.elements[0] = imageGain;
  gain = shortBuffer.v;
  gain =  vec_splat(gain, 0 );

  shortBuffer.elements[0] = rightGain;
  gainR = shortBuffer.v;
  gainR =  vec_splat(gainR, 0 );

  bitBuffer.elements[0] = 8;

  //Load it into the vector unit
  bitshift = bitBuffer.v;
  bitshift = vec_splat(bitshift,0);

  shortBuffer.elements[0] = 128;

  //Load it into the vector unit
  gainAdd = shortBuffer.v;
  gainAdd = (vector signed short)vec_splat((vector signed short)gainAdd,0);

# ifndef PPC970
  UInt32                        prefetchSize = GetPrefetchConstant( 16, 1,
      256 );
  vec_dst( inData, prefetchSize, 0 );
  vec_dst( rightData, prefetchSize, 1 );
  vec_dst( inData+32, prefetchSize, 2 );
  vec_dst( rightData+32, prefetchSize, 3 );
# endif

  loadImage = inData[0];
  loadRight = rightData[0];

  for ( h=0; h<image.ysize; h++) {
    for (w=0; w<width; w++) {
# ifndef PPC970
      vec_dst( inData, prefetchSize, 0 );
      vec_dst( rightData, prefetchSize, 1 );
      vec_dst( inData+32, prefetchSize, 2 );
      vec_dst( rightData+32, prefetchSize, 3 );
# endif
      //interleaved U Y V Y chars

      hiImage = (vector signed short) vec_mergeh( zero, loadImage );
      loImage = (vector signed short) vec_mergel( zero, loadImage );

      hiRight = (vector signed short) vec_mergeh( zero, loadRight );
      loRight = (vector signed short) vec_mergel( zero, loadRight );

      //hoist that load!!
      loadImage = inData[1];
      loadRight = rightData[1];

      //subtract 128 from UV

      hiImage = vec_subs(hiImage,gainSub);
      loImage = vec_subs(loImage,gainSub);

      hiRight = vec_subs(hiRight,gainSub);
      loRight = vec_subs(loRight,gainSub);

      //now vec_mule the UV into two vector ints
      //change sone to gain
      UVhi = vec_mule(gain,hiImage);
      UVlo = vec_mule(gain,loImage);

      UVhiR = vec_mule(gainR,hiRight);
      UVloR = vec_mule(gainR,loRight);

      //now vec_mulo the Y into two vector ints
      Yhi = vec_mulo(gain,hiImage);
      Ylo = vec_mulo(gain,loImage);

      YhiR = vec_mulo(gainR,hiRight);
      YloR = vec_mulo(gainR,loRight);


      //this is where to do the add and bitshift due to the resolution
      //add UV
      UVhi = vec_adds(UVhi,UVhiR);
      UVlo = vec_adds(UVlo,UVloR);

      Yhi = vec_adds(Yhi,YhiR);
      Ylo = vec_adds(Ylo,YloR);

      //bitshift UV
      UVhi = vec_sra(UVhi,bitshift);
      UVlo = vec_sra(UVlo,bitshift);

      Yhi = vec_sra(Yhi,bitshift);
      Ylo = vec_sra(Ylo,bitshift);

      //pack the UV into a single short vector
      UVImage =  vec_packs(UVhi,UVlo);

      //pack the Y into a single short vector
      YImage =  vec_packs(Yhi,Ylo);

      //vec_mergel + vec_mergeh Y and UV
      hiImage =  vec_mergeh(UVImage,YImage);
      loImage =  vec_mergel(UVImage,YImage);

      //add 128 offset back
      hiImage = vec_adds(hiImage,gainSub);
      loImage = vec_adds(loImage,gainSub);

      //vec_mergel + vec_mergeh Y and UV
      rightData[0] = (vector unsigned char)vec_packsu(hiImage, loImage);
      inData[0] = (vector unsigned char)vec_packsu(hiImage, loImage);

      inData++;
      rightData++;
    }
  }
# ifndef PPC970
  //stop the cache streams
  vec_dss( 0 );
  vec_dss( 1 );
  vec_dss( 2 );
  vec_dss( 3 );
# endif


}/* end of working altivec function */