/*
  clear_blocks_dcbz32_ppc will not work properly
  on PowerPC processors with a cache line size
  not equal to 32 bytes.
  Fortunately all processor used by Apple up to
  at least the 7450 (aka second generation G4)
  use 32 bytes cache line.
  This is due to the use of the 'dcbz' instruction.
  It simply clear to zero a single cache line,
  so you need to know the cache line size to use it !
  It's absurd, but it's fast...

  update 24/06/2003 : Apple released yesterday the G5,
  with a PPC970. cache line size : 128 bytes. Oups.
  The semantic of dcbz was changed, it always clear
  32 bytes. so the function below will work, but will
  be slow. So I fixed check_dcbz_effect to use dcbzl,
  which is defined to clear a cache line (as dcbz before).
  So we still can distinguish, and use dcbz (32 bytes)
  or dcbzl (one cache line) as required.

  see <http://developer.apple.com/technotes/tn/tn2087.html>
  and <http://developer.apple.com/technotes/tn/tn2086.html>
*/
void clear_blocks_dcbz32_ppc(DCTELEM *blocks)
{
POWERPC_PERF_DECLARE(powerpc_clear_blocks_dcbz32, 1);
    register int misal = ((unsigned long)blocks & 0x00000010);
    register int i = 0;
POWERPC_PERF_START_COUNT(powerpc_clear_blocks_dcbz32, 1);
#if 1
    if (misal) {
      ((unsigned long*)blocks)[0] = 0L;
      ((unsigned long*)blocks)[1] = 0L;
      ((unsigned long*)blocks)[2] = 0L;
      ((unsigned long*)blocks)[3] = 0L;
      i += 16;
    }
    for ( ; i < sizeof(DCTELEM)*6*64-31 ; i += 32) {
#ifndef __MWERKS__
      asm volatile("dcbz %0,%1" : : "b" (blocks), "r" (i) : "memory");
#else
      __dcbz( blocks, i );
#endif
    }
    if (misal) {
      ((unsigned long*)blocks)[188] = 0L;
      ((unsigned long*)blocks)[189] = 0L;
      ((unsigned long*)blocks)[190] = 0L;
      ((unsigned long*)blocks)[191] = 0L;
      i += 16;
    }
#else
    memset(blocks, 0, sizeof(DCTELEM)*6*64);
#endif
POWERPC_PERF_STOP_COUNT(powerpc_clear_blocks_dcbz32, 1);
}
Example #2
0
void idct_put_altivec(uint8_t* dest, int stride, int16_t *blk)
{
POWERPC_PERF_DECLARE(altivec_idct_put_num, 1);
    vec_s16 *block = (vec_s16*)blk;
    vec_u8 tmp;

#if CONFIG_POWERPC_PERF
POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1);
#endif
    IDCT

#define COPY(dest,src)                                          \
    tmp = vec_packsu (src, src);                                \
    vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest);       \
    vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);

    COPY (dest, vx0)    dest += stride;
    COPY (dest, vx1)    dest += stride;
    COPY (dest, vx2)    dest += stride;
    COPY (dest, vx3)    dest += stride;
    COPY (dest, vx4)    dest += stride;
    COPY (dest, vx5)    dest += stride;
    COPY (dest, vx6)    dest += stride;
    COPY (dest, vx7)

POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1);
}
Example #3
0
void idct_put_altivec(uint8_t* dest, int stride, vector_s16_t* block)
{
POWERPC_PERF_DECLARE(altivec_idct_put_num, 1);
    vector_u8_t tmp;

#ifdef POWERPC_PERFORMANCE_REPORT
POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1);
#endif
    IDCT

#define COPY(dest,src)                                          \
    tmp = vec_packsu (src, src);                                \
    vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);       \
    vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);

    COPY (dest, vx0)    dest += stride;
    COPY (dest, vx1)    dest += stride;
    COPY (dest, vx2)    dest += stride;
    COPY (dest, vx3)    dest += stride;
    COPY (dest, vx4)    dest += stride;
    COPY (dest, vx5)    dest += stride;
    COPY (dest, vx6)    dest += stride;
    COPY (dest, vx7)

POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1);
}
Example #4
0
void idct_add_altivec(uint8_t* dest, int stride, int16_t *blk)
{
POWERPC_PERF_DECLARE(altivec_idct_add_num, 1);
    vec_s16 *block = (vec_s16*)blk;
    vec_u8 tmp;
    vec_s16 tmp2, tmp3;
    vec_u8 perm0;
    vec_u8 perm1;
    vec_u8 p0, p1, p;

#if CONFIG_POWERPC_PERF
POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1);
#endif

    IDCT

    p0 = vec_lvsl (0, dest);
    p1 = vec_lvsl (stride, dest);
    p = vec_splat_u8 (-1);
    perm0 = vec_mergeh (p, p0);
    perm1 = vec_mergeh (p, p1);

#define ADD(dest,src,perm)                                              \
    /* *(uint64_t *)&tmp = *(uint64_t *)dest; */                        \
    tmp = vec_ld (0, dest);                                             \
    tmp2 = (vec_s16)vec_perm (tmp, (vec_u8)zero, perm);       \
    tmp3 = vec_adds (tmp2, src);                                        \
    tmp = vec_packsu (tmp3, tmp3);                                      \
    vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest);               \
    vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);

    ADD (dest, vx0, perm0)      dest += stride;
    ADD (dest, vx1, perm1)      dest += stride;
    ADD (dest, vx2, perm0)      dest += stride;
    ADD (dest, vx3, perm1)      dest += stride;
    ADD (dest, vx4, perm0)      dest += stride;
    ADD (dest, vx5, perm1)      dest += stride;
    ADD (dest, vx6, perm0)      dest += stride;
    ADD (dest, vx7, perm1)

POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1);
}
void clear_blocks_dcbz128_ppc(DCTELEM *blocks)
{
POWERPC_PERF_DECLARE(powerpc_clear_blocks_dcbz128, 1);
    register int misal = ((unsigned long)blocks & 0x0000007f);
    register int i = 0;
POWERPC_PERF_START_COUNT(powerpc_clear_blocks_dcbz128, 1);
#if 1
 if (misal) {
   // we could probably also optimize this case,
   // but there's not much point as the machines
   // aren't available yet (2003-06-26)
      memset(blocks, 0, sizeof(DCTELEM)*6*64);
    }
    else
      for ( ; i < sizeof(DCTELEM)*6*64 ; i += 128) {
        asm volatile("dcbzl %0,%1" : : "b" (blocks), "r" (i) : "memory");
      }
#else
    memset(blocks, 0, sizeof(DCTELEM)*6*64);
#endif
POWERPC_PERF_STOP_COUNT(powerpc_clear_blocks_dcbz128, 1);
}
/* this code assume that stride % 16 == 0 */
void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
  POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1);
    DECLARE_ALIGNED_16(signed int, ABCD[4]) =
                        {((8 - x) * (8 - y)),
                          ((x) * (8 - y)),
                          ((8 - x) * (y)),
                          ((x) * (y))};
    register int i;
    vec_u8_t fperm;
    const vec_s32_t vABCD = vec_ld(0, ABCD);
    const vec_s16_t vA = vec_splat((vec_s16_t)vABCD, 1);
    const vec_s16_t vB = vec_splat((vec_s16_t)vABCD, 3);
    const vec_s16_t vC = vec_splat((vec_s16_t)vABCD, 5);
    const vec_s16_t vD = vec_splat((vec_s16_t)vABCD, 7);
    LOAD_ZERO;
    const vec_s16_t v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
    const vec_u16_t v6us = vec_splat_u16(6);
    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;

    vec_u8_t vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
    vec_u8_t vsrc0uc, vsrc1uc;
    vec_s16_t vsrc0ssH, vsrc1ssH;
    vec_u8_t vsrcCuc, vsrc2uc, vsrc3uc;
    vec_s16_t vsrc2ssH, vsrc3ssH, psum;
    vec_u8_t vdst, ppsum, vfdst, fsum;

  POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1);

    if (((unsigned long)dst) % 16 == 0) {
      fperm = (vec_u8_t)AVV(0x10, 0x11, 0x12, 0x13,
                            0x14, 0x15, 0x16, 0x17,
                            0x08, 0x09, 0x0A, 0x0B,
                            0x0C, 0x0D, 0x0E, 0x0F);
    } else {
      fperm = (vec_u8_t)AVV(0x00, 0x01, 0x02, 0x03,
                            0x04, 0x05, 0x06, 0x07,
                            0x18, 0x19, 0x1A, 0x1B,
                            0x1C, 0x1D, 0x1E, 0x1F);
    }

    vsrcAuc = vec_ld(0, src);

    if (loadSecond)
      vsrcBuc = vec_ld(16, src);
    vsrcperm0 = vec_lvsl(0, src);
    vsrcperm1 = vec_lvsl(1, src);

    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
    if (reallyBadAlign)
      vsrc1uc = vsrcBuc;
    else
      vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);

    vsrc0ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc0uc);
    vsrc1ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc1uc);

    if (!loadSecond) {// -> !reallyBadAlign
      for (i = 0 ; i < h ; i++) {


        vsrcCuc = vec_ld(stride + 0, src);

        vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
        vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);

        vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc2uc);
        vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc3uc);

        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v32ss, psum);
        psum = vec_sra(psum, v6us);

        vdst = vec_ld(0, dst);
        ppsum = (vec_u8_t)vec_packsu(psum, psum);
        vfdst = vec_perm(vdst, ppsum, fperm);

        OP_U8_ALTIVEC(fsum, vfdst, vdst);

        vec_st(fsum, 0, dst);

        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;

        dst += stride;
        src += stride;
      }
    } else {
        vec_u8_t vsrcDuc;
      for (i = 0 ; i < h ; i++) {
        vsrcCuc = vec_ld(stride + 0, src);
        vsrcDuc = vec_ld(stride + 16, src);

        vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
        if (reallyBadAlign)
          vsrc3uc = vsrcDuc;
        else
          vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);

        vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc2uc);
        vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc3uc);

        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v32ss, psum);
        psum = vec_sr(psum, v6us);

        vdst = vec_ld(0, dst);
        ppsum = (vec_u8_t)vec_pack(psum, psum);
        vfdst = vec_perm(vdst, ppsum, fperm);

        OP_U8_ALTIVEC(fsum, vfdst, vdst);

        vec_st(fsum, 0, dst);

        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;

        dst += stride;
        src += stride;
      }
    }
    POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1);
}
Example #7
0
void fdct_altivec(int16_t *block)
{
POWERPC_PERF_DECLARE(altivec_fdct, 1);
    vector signed short *bp;
    vector float *cp;
    vector float b00, b10, b20, b30, b40, b50, b60, b70;
    vector float b01, b11, b21, b31, b41, b51, b61, b71;
    vector float mzero, cnst, cnsts0, cnsts1, cnsts2;
    vector float x0, x1, x2, x3, x4, x5, x6, x7, x8;

    POWERPC_PERF_START_COUNT(altivec_fdct, 1);


    /* setup constants {{{ */
    /* mzero = -0.0 */
    mzero = ((vector float)vec_splat_u32(-1));
    mzero = ((vector float)vec_sl(vu32(mzero), vu32(mzero)));
    cp = fdctconsts;
    cnsts0 = vec_ld(0, cp); cp++;
    cnsts1 = vec_ld(0, cp); cp++;
    cnsts2 = vec_ld(0, cp);
    /* }}} */


    /* 8x8 matrix transpose (vector short[8]) {{{ */
#define MERGE_S16(hl,a,b) vec_merge##hl(vs16(a), vs16(b))

    bp = (vector signed short*)block;
    b00 = ((vector float)vec_ld(0,    bp));
    b40 = ((vector float)vec_ld(16*4, bp));
    b01 = ((vector float)MERGE_S16(h, b00, b40));
    b11 = ((vector float)MERGE_S16(l, b00, b40));
    bp++;
    b10 = ((vector float)vec_ld(0,    bp));
    b50 = ((vector float)vec_ld(16*4, bp));
    b21 = ((vector float)MERGE_S16(h, b10, b50));
    b31 = ((vector float)MERGE_S16(l, b10, b50));
    bp++;
    b20 = ((vector float)vec_ld(0,    bp));
    b60 = ((vector float)vec_ld(16*4, bp));
    b41 = ((vector float)MERGE_S16(h, b20, b60));
    b51 = ((vector float)MERGE_S16(l, b20, b60));
    bp++;
    b30 = ((vector float)vec_ld(0,    bp));
    b70 = ((vector float)vec_ld(16*4, bp));
    b61 = ((vector float)MERGE_S16(h, b30, b70));
    b71 = ((vector float)MERGE_S16(l, b30, b70));

    x0 = ((vector float)MERGE_S16(h, b01, b41));
    x1 = ((vector float)MERGE_S16(l, b01, b41));
    x2 = ((vector float)MERGE_S16(h, b11, b51));
    x3 = ((vector float)MERGE_S16(l, b11, b51));
    x4 = ((vector float)MERGE_S16(h, b21, b61));
    x5 = ((vector float)MERGE_S16(l, b21, b61));
    x6 = ((vector float)MERGE_S16(h, b31, b71));
    x7 = ((vector float)MERGE_S16(l, b31, b71));

    b00 = ((vector float)MERGE_S16(h, x0, x4));
    b10 = ((vector float)MERGE_S16(l, x0, x4));
    b20 = ((vector float)MERGE_S16(h, x1, x5));
    b30 = ((vector float)MERGE_S16(l, x1, x5));
    b40 = ((vector float)MERGE_S16(h, x2, x6));
    b50 = ((vector float)MERGE_S16(l, x2, x6));
    b60 = ((vector float)MERGE_S16(h, x3, x7));
    b70 = ((vector float)MERGE_S16(l, x3, x7));

#undef MERGE_S16
    /* }}} */


/* Some of the initial calculations can be done as vector short before
 * conversion to vector float.  The following code section takes advantage
 * of this.
 */
#if 1
    /* fdct rows {{{ */
    x0 = ((vector float)vec_add(vs16(b00), vs16(b70)));
    x7 = ((vector float)vec_sub(vs16(b00), vs16(b70)));
    x1 = ((vector float)vec_add(vs16(b10), vs16(b60)));
    x6 = ((vector float)vec_sub(vs16(b10), vs16(b60)));
    x2 = ((vector float)vec_add(vs16(b20), vs16(b50)));
    x5 = ((vector float)vec_sub(vs16(b20), vs16(b50)));
    x3 = ((vector float)vec_add(vs16(b30), vs16(b40)));
    x4 = ((vector float)vec_sub(vs16(b30), vs16(b40)));

    b70 = ((vector float)vec_add(vs16(x0), vs16(x3)));
    b10 = ((vector float)vec_add(vs16(x1), vs16(x2)));

    b00 = ((vector float)vec_add(vs16(b70), vs16(b10)));
    b40 = ((vector float)vec_sub(vs16(b70), vs16(b10)));

#define CTF0(n) \
    b##n##1 = ((vector float)vec_unpackl(vs16(b##n##0))); \
    b##n##0 = ((vector float)vec_unpackh(vs16(b##n##0))); \
    b##n##1 = vec_ctf(vs32(b##n##1), 0); \
    b##n##0 = vec_ctf(vs32(b##n##0), 0);

    CTF0(0);
    CTF0(4);

    b20 = ((vector float)vec_sub(vs16(x0), vs16(x3)));
    b60 = ((vector float)vec_sub(vs16(x1), vs16(x2)));

    CTF0(2);
    CTF0(6);

#undef CTF0

    x0 = vec_add(b60, b20);
    x1 = vec_add(b61, b21);

    cnst = LD_W2;
    x0 = vec_madd(cnst, x0, mzero);
    x1 = vec_madd(cnst, x1, mzero);
    cnst = LD_W1;
    b20 = vec_madd(cnst, b20, x0);
    b21 = vec_madd(cnst, b21, x1);
    cnst = LD_W0;
    b60 = vec_madd(cnst, b60, x0);
    b61 = vec_madd(cnst, b61, x1);

#define CTFX(x,b) \
    b##0 = ((vector float)vec_unpackh(vs16(x))); \
    b##1 = ((vector float)vec_unpackl(vs16(x))); \
    b##0 = vec_ctf(vs32(b##0), 0); \
    b##1 = vec_ctf(vs32(b##1), 0); \

    CTFX(x4, b7);
    CTFX(x5, b5);
    CTFX(x6, b3);
    CTFX(x7, b1);

#undef CTFX


    x0 = vec_add(b70, b10);
    x1 = vec_add(b50, b30);
    x2 = vec_add(b70, b30);
    x3 = vec_add(b50, b10);
    x8 = vec_add(x2, x3);
    cnst = LD_W3;
    x8 = vec_madd(cnst, x8, mzero);

    cnst = LD_W8;
    x0 = vec_madd(cnst, x0, mzero);
    cnst = LD_W9;
    x1 = vec_madd(cnst, x1, mzero);
    cnst = LD_WA;
    x2 = vec_madd(cnst, x2, x8);
    cnst = LD_WB;
    x3 = vec_madd(cnst, x3, x8);

    cnst = LD_W4;
    b70 = vec_madd(cnst, b70, x0);
    cnst = LD_W5;
    b50 = vec_madd(cnst, b50, x1);
    cnst = LD_W6;
    b30 = vec_madd(cnst, b30, x1);
    cnst = LD_W7;
    b10 = vec_madd(cnst, b10, x0);

    b70 = vec_add(b70, x2);
    b50 = vec_add(b50, x3);
    b30 = vec_add(b30, x2);
    b10 = vec_add(b10, x3);


    x0 = vec_add(b71, b11);
    x1 = vec_add(b51, b31);
    x2 = vec_add(b71, b31);
    x3 = vec_add(b51, b11);
    x8 = vec_add(x2, x3);
    cnst = LD_W3;
    x8 = vec_madd(cnst, x8, mzero);

    cnst = LD_W8;
    x0 = vec_madd(cnst, x0, mzero);
    cnst = LD_W9;
    x1 = vec_madd(cnst, x1, mzero);
    cnst = LD_WA;
    x2 = vec_madd(cnst, x2, x8);
    cnst = LD_WB;
    x3 = vec_madd(cnst, x3, x8);

    cnst = LD_W4;
    b71 = vec_madd(cnst, b71, x0);
    cnst = LD_W5;
    b51 = vec_madd(cnst, b51, x1);
    cnst = LD_W6;
    b31 = vec_madd(cnst, b31, x1);
    cnst = LD_W7;
    b11 = vec_madd(cnst, b11, x0);

    b71 = vec_add(b71, x2);
    b51 = vec_add(b51, x3);
    b31 = vec_add(b31, x2);
    b11 = vec_add(b11, x3);
    /* }}} */
#else
    /* convert to float {{{ */
#define CTF(n) \
    vs32(b##n##1) = vec_unpackl(vs16(b##n##0)); \
    vs32(b##n##0) = vec_unpackh(vs16(b##n##0)); \
    b##n##1 = vec_ctf(vs32(b##n##1), 0); \
    b##n##0 = vec_ctf(vs32(b##n##0), 0); \

    CTF(0);
    CTF(1);
    CTF(2);
    CTF(3);
    CTF(4);
    CTF(5);
    CTF(6);
    CTF(7);

#undef CTF
    /* }}} */

    FDCTROW(b00, b10, b20, b30, b40, b50, b60, b70);
    FDCTROW(b01, b11, b21, b31, b41, b51, b61, b71);
#endif


    /* 8x8 matrix transpose (vector float[8][2]) {{{ */
    x0 = vec_mergel(b00, b20);
    x1 = vec_mergeh(b00, b20);
    x2 = vec_mergel(b10, b30);
    x3 = vec_mergeh(b10, b30);

    b00 = vec_mergeh(x1, x3);
    b10 = vec_mergel(x1, x3);
    b20 = vec_mergeh(x0, x2);
    b30 = vec_mergel(x0, x2);

    x4 = vec_mergel(b41, b61);
    x5 = vec_mergeh(b41, b61);
    x6 = vec_mergel(b51, b71);
    x7 = vec_mergeh(b51, b71);

    b41 = vec_mergeh(x5, x7);
    b51 = vec_mergel(x5, x7);
    b61 = vec_mergeh(x4, x6);
    b71 = vec_mergel(x4, x6);

    x0 = vec_mergel(b01, b21);
    x1 = vec_mergeh(b01, b21);
    x2 = vec_mergel(b11, b31);
    x3 = vec_mergeh(b11, b31);

    x4 = vec_mergel(b40, b60);
    x5 = vec_mergeh(b40, b60);
    x6 = vec_mergel(b50, b70);
    x7 = vec_mergeh(b50, b70);

    b40 = vec_mergeh(x1, x3);
    b50 = vec_mergel(x1, x3);
    b60 = vec_mergeh(x0, x2);
    b70 = vec_mergel(x0, x2);

    b01 = vec_mergeh(x5, x7);
    b11 = vec_mergel(x5, x7);
    b21 = vec_mergeh(x4, x6);
    b31 = vec_mergel(x4, x6);
    /* }}} */


    FDCTCOL(b00, b10, b20, b30, b40, b50, b60, b70);
    FDCTCOL(b01, b11, b21, b31, b41, b51, b61, b71);


    /* round, convert back to short {{{ */
#define CTS(n) \
    b##n##0 = vec_round(b##n##0); \
    b##n##1 = vec_round(b##n##1); \
    b##n##0 = ((vector float)vec_cts(b##n##0, 0)); \
    b##n##1 = ((vector float)vec_cts(b##n##1, 0)); \
    b##n##0 = ((vector float)vec_pack(vs32(b##n##0), vs32(b##n##1))); \
    vec_st(vs16(b##n##0), 0, bp);

    bp = (vector signed short*)block;
    CTS(0); bp++;
    CTS(1); bp++;
    CTS(2); bp++;
    CTS(3); bp++;
    CTS(4); bp++;
    CTS(5); bp++;
    CTS(6); bp++;
    CTS(7);

#undef CTS
    /* }}} */

POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
}
/* this code assume stride % 16 == 0 */
static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1);
  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
  register int i;
  
  const vector signed int vzero = vec_splat_s32(0);
  const vector unsigned char permM2 = vec_lvsl(-2, src);
  const vector unsigned char permM1 = vec_lvsl(-1, src);
  const vector unsigned char permP0 = vec_lvsl(+0, src);
  const vector unsigned char permP1 = vec_lvsl(+1, src);
  const vector unsigned char permP2 = vec_lvsl(+2, src);
  const vector unsigned char permP3 = vec_lvsl(+3, src);
  const vector signed short v20ss = (const vector signed short)AVV(20);
  const vector unsigned short v5us = vec_splat_u16(5);
  const vector signed short v5ss = vec_splat_s16(5);
  const vector signed short v16ss = (const vector signed short)AVV(16);
  const vector unsigned char dstperm = vec_lvsr(0, dst);
  const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1);
  const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);

  register int align = ((((unsigned long)src) - 2) % 16);

  for (i = 0 ; i < 16 ; i ++) {
    vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
    vector unsigned char srcR1 = vec_ld(-2, src);
    vector unsigned char srcR2 = vec_ld(14, src);

    switch (align) {
    default: {
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = vec_perm(srcR1, srcR2, permP2);
      srcP3 = vec_perm(srcR1, srcR2, permP3);
    } break;
    case 11: {
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = vec_perm(srcR1, srcR2, permP2);
      srcP3 = srcR2;
    } break;
    case 12: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = srcR2;
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 13: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = srcR2;
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 14: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = srcR2;
      srcP1 = vec_perm(srcR2, srcR3, permP1);
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 15: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = srcR2;
      srcP0 = vec_perm(srcR2, srcR3, permP0);
      srcP1 = vec_perm(srcR2, srcR3, permP1);
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    }

    const vector signed short srcP0A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0);
    const vector signed short srcP0B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0);
    const vector signed short srcP1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1);
    const vector signed short srcP1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1);

    const vector signed short srcP2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2);
    const vector signed short srcP2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2);
    const vector signed short srcP3A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3);
    const vector signed short srcP3B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3);

    const vector signed short srcM1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1);
    const vector signed short srcM1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1);
    const vector signed short srcM2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2);
    const vector signed short srcM2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2);

    const vector signed short sum1A = vec_adds(srcP0A, srcP1A);
    const vector signed short sum1B = vec_adds(srcP0B, srcP1B);
    const vector signed short sum2A = vec_adds(srcM1A, srcP2A);
    const vector signed short sum2B = vec_adds(srcM1B, srcP2B);
    const vector signed short sum3A = vec_adds(srcM2A, srcP3A);
    const vector signed short sum3B = vec_adds(srcM2B, srcP3B);
    
    const vector signed short pp1A = vec_mladd(sum1A, v20ss, v16ss);
    const vector signed short pp1B = vec_mladd(sum1B, v20ss, v16ss);

    const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
    const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
    
    const vector signed short pp3A = vec_add(sum3A, pp1A);
    const vector signed short pp3B = vec_add(sum3B, pp1B);

    const vector signed short psumA = vec_sub(pp3A, pp2A);
    const vector signed short psumB = vec_sub(pp3B, pp2B);

    const vector signed short sumA = vec_sra(psumA, v5us);
    const vector signed short sumB = vec_sra(psumB, v5us);

    const vector unsigned char sum = vec_packsu(sumA, sumB);

    const vector unsigned char dst1 = vec_ld(0, dst);
    const vector unsigned char dst2 = vec_ld(16, dst);
    const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));

    vector unsigned char fsum;
    OP_U8_ALTIVEC(fsum, sum, vdst);

    const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm);
    const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask);
    const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask);

    vec_st(fdst1, 0, dst);
    vec_st(fdst2, 16, dst);

    src += srcStride;
    dst += dstStride;
  }
POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
}
Example #9
0
void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
                                    int stride, int h, int x, int y) {
  POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1);
    DECLARE_ALIGNED_16(signed int, ABCD[4]) =
                        {((8 - x) * (8 - y)),
                         ((    x) * (8 - y)),
                         ((8 - x) * (    y)),
                         ((    x) * (    y))};
    register int i;
    vec_u8 fperm;
    const vec_s32 vABCD = vec_ld(0, ABCD);
    const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
    const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
    const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
    const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
    LOAD_ZERO;
    const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
    const vec_u16 v6us = vec_splat_u16(6);
    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;

    vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
    vec_u8 vsrc0uc, vsrc1uc;
    vec_s16 vsrc0ssH, vsrc1ssH;
    vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
    vec_s16 vsrc2ssH, vsrc3ssH, psum;
    vec_u8 vdst, ppsum, vfdst, fsum;

  POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1);

    if (((unsigned long)dst) % 16 == 0) {
        fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
                         0x14, 0x15, 0x16, 0x17,
                         0x08, 0x09, 0x0A, 0x0B,
                         0x0C, 0x0D, 0x0E, 0x0F};
    } else {
        fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
                         0x04, 0x05, 0x06, 0x07,
                         0x18, 0x19, 0x1A, 0x1B,
                         0x1C, 0x1D, 0x1E, 0x1F};
    }

    vsrcAuc = vec_ld(0, src);

    if (loadSecond)
        vsrcBuc = vec_ld(16, src);
    vsrcperm0 = vec_lvsl(0, src);
    vsrcperm1 = vec_lvsl(1, src);

    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
    if (reallyBadAlign)
        vsrc1uc = vsrcBuc;
    else
        vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);

    vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);
    vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);

    if (ABCD[3]) {
        if (!loadSecond) {// -> !reallyBadAlign
            for (i = 0 ; i < h ; i++) {
                vsrcCuc = vec_ld(stride + 0, src);
                vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
                vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);

                CHROMA_MC8_ALTIVEC_CORE
            }
        } else {
            vec_u8 vsrcDuc;
            for (i = 0 ; i < h ; i++) {
                vsrcCuc = vec_ld(stride + 0, src);
                vsrcDuc = vec_ld(stride + 16, src);
                vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
                if (reallyBadAlign)
                    vsrc3uc = vsrcDuc;
                else
                    vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);

                CHROMA_MC8_ALTIVEC_CORE
            }
        }
    } else {
void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
{
POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND);
    const DECLARE_ALIGNED_16(unsigned short, rounder_a[8]) =
      {rounder, rounder, rounder, rounder,
       rounder, rounder, rounder, rounder};
    const DECLARE_ALIGNED_16(unsigned short, ABCD[8]) =
      {
        (16-x16)*(16-y16), /* A */
        (   x16)*(16-y16), /* B */
        (16-x16)*(   y16), /* C */
        (   x16)*(   y16), /* D */
        0, 0, 0, 0         /* padding */
      };
    register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
    register const_vector unsigned short vcsr8 = (const_vector unsigned short)vec_splat_u16(8);
    register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
    register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
    int i;
    unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
    unsigned long src_really_odd = (unsigned long)src & 0x0000000F;


POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);

    tempA = vec_ld(0, (unsigned short*)ABCD);
    Av = vec_splat(tempA, 0);
    Bv = vec_splat(tempA, 1);
    Cv = vec_splat(tempA, 2);
    Dv = vec_splat(tempA, 3);

    rounderV = vec_ld(0, (unsigned short*)rounder_a);

    // we'll be able to pick-up our 9 char elements
    // at src from those 32 bytes
    // we load the first batch here, as inside the loop
    // we can re-use 'src+stride' from one iteration
    // as the 'src' of the next.
    src_0 = vec_ld(0, src);
    src_1 = vec_ld(16, src);
    srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));

    if (src_really_odd != 0x0000000F)
    { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
      srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
    }
    else
    {
      srcvB = src_1;
    }
    srcvA = vec_mergeh(vczero, srcvA);
    srcvB = vec_mergeh(vczero, srcvB);

    for(i=0; i<h; i++)
    {
      dst_odd = (unsigned long)dst & 0x0000000F;
      src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;

      dstv = vec_ld(0, dst);

      // we we'll be able to pick-up our 9 char elements
      // at src + stride from those 32 bytes
      // then reuse the resulting 2 vectors srvcC and srcvD
      // as the next srcvA and srcvB
      src_0 = vec_ld(stride + 0, src);
      src_1 = vec_ld(stride + 16, src);
      srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));

      if (src_really_odd != 0x0000000F)
      { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
        srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
      }
      else
      {
        srcvD = src_1;
      }

      srcvC = vec_mergeh(vczero, srcvC);
      srcvD = vec_mergeh(vczero, srcvD);


      // OK, now we (finally) do the math :-)
      // those four instructions replaces 32 int muls & 32 int adds.
      // isn't AltiVec nice ?
      tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
      tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
      tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
      tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);

      srcvA = srcvC;
      srcvB = srcvD;

      tempD = vec_sr(tempD, vcsr8);

      dstv2 = vec_pack(tempD, (vector unsigned short)vczero);

      if (dst_odd)
      {
        dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
      }
      else
      {
        dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
      }

      vec_st(dstv2, 0, dst);

      dst += stride;
      src += stride;
    }

POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
}
/* this code assume stride % 16 == 0 */
static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_v_lowpass_num, 1);

  register int i;

  LOAD_ZERO;
  const vec_u8_t perm = vec_lvsl(0, src);
  const vec_s16_t v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
  const vec_u16_t v5us = vec_splat_u16(5);
  const vec_s16_t v5ss = vec_splat_s16(5);
  const vec_s16_t v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));

  uint8_t *srcbis = src - (srcStride * 2);

  const vec_u8_t srcM2a = vec_ld(0, srcbis);
  const vec_u8_t srcM2b = vec_ld(16, srcbis);
  const vec_u8_t srcM2 = vec_perm(srcM2a, srcM2b, perm);
//  srcbis += srcStride;
  const vec_u8_t srcM1a = vec_ld(0, srcbis += srcStride);
  const vec_u8_t srcM1b = vec_ld(16, srcbis);
  const vec_u8_t srcM1 = vec_perm(srcM1a, srcM1b, perm);
//  srcbis += srcStride;
  const vec_u8_t srcP0a = vec_ld(0, srcbis += srcStride);
  const vec_u8_t srcP0b = vec_ld(16, srcbis);
  const vec_u8_t srcP0 = vec_perm(srcP0a, srcP0b, perm);
//  srcbis += srcStride;
  const vec_u8_t srcP1a = vec_ld(0, srcbis += srcStride);
  const vec_u8_t srcP1b = vec_ld(16, srcbis);
  const vec_u8_t srcP1 = vec_perm(srcP1a, srcP1b, perm);
//  srcbis += srcStride;
  const vec_u8_t srcP2a = vec_ld(0, srcbis += srcStride);
  const vec_u8_t srcP2b = vec_ld(16, srcbis);
  const vec_u8_t srcP2 = vec_perm(srcP2a, srcP2b, perm);
//  srcbis += srcStride;

  vec_s16_t srcM2ssA = (vec_s16_t) vec_mergeh(zero_u8v, srcM2);
  vec_s16_t srcM2ssB = (vec_s16_t) vec_mergel(zero_u8v, srcM2);
  vec_s16_t srcM1ssA = (vec_s16_t) vec_mergeh(zero_u8v, srcM1);
  vec_s16_t srcM1ssB = (vec_s16_t) vec_mergel(zero_u8v, srcM1);
  vec_s16_t srcP0ssA = (vec_s16_t) vec_mergeh(zero_u8v, srcP0);
  vec_s16_t srcP0ssB = (vec_s16_t) vec_mergel(zero_u8v, srcP0);
  vec_s16_t srcP1ssA = (vec_s16_t) vec_mergeh(zero_u8v, srcP1);
  vec_s16_t srcP1ssB = (vec_s16_t) vec_mergel(zero_u8v, srcP1);
  vec_s16_t srcP2ssA = (vec_s16_t) vec_mergeh(zero_u8v, srcP2);
  vec_s16_t srcP2ssB = (vec_s16_t) vec_mergel(zero_u8v, srcP2);

  vec_s16_t pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
                      psumA, psumB, sumA, sumB,
                      srcP3ssA, srcP3ssB,
                      sum1A, sum1B, sum2A, sum2B, sum3A, sum3B;

  vec_u8_t sum, vdst, fsum, srcP3a, srcP3b, srcP3;

  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);

  for (i = 0 ; i < 16 ; i++) {
    srcP3a = vec_ld(0, srcbis += srcStride);
    srcP3b = vec_ld(16, srcbis);
    srcP3 = vec_perm(srcP3a, srcP3b, perm);
    srcP3ssA = (vec_s16_t) vec_mergeh(zero_u8v, srcP3);
    srcP3ssB = (vec_s16_t) vec_mergel(zero_u8v, srcP3);
//    srcbis += srcStride;

    sum1A = vec_adds(srcP0ssA, srcP1ssA);
    sum1B = vec_adds(srcP0ssB, srcP1ssB);
    sum2A = vec_adds(srcM1ssA, srcP2ssA);
    sum2B = vec_adds(srcM1ssB, srcP2ssB);
    sum3A = vec_adds(srcM2ssA, srcP3ssA);
    sum3B = vec_adds(srcM2ssB, srcP3ssB);

    srcM2ssA = srcM1ssA;
    srcM2ssB = srcM1ssB;
    srcM1ssA = srcP0ssA;
    srcM1ssB = srcP0ssB;
    srcP0ssA = srcP1ssA;
    srcP0ssB = srcP1ssB;
    srcP1ssA = srcP2ssA;
    srcP1ssB = srcP2ssB;
    srcP2ssA = srcP3ssA;
    srcP2ssB = srcP3ssB;

    pp1A = vec_mladd(sum1A, v20ss, v16ss);
    pp1B = vec_mladd(sum1B, v20ss, v16ss);

    pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
    pp2B = vec_mladd(sum2B, v5ss, zero_s16v);

    pp3A = vec_add(sum3A, pp1A);
    pp3B = vec_add(sum3B, pp1B);

    psumA = vec_sub(pp3A, pp2A);
    psumB = vec_sub(pp3B, pp2B);

    sumA = vec_sra(psumA, v5us);
    sumB = vec_sra(psumB, v5us);

    sum = vec_packsu(sumA, sumB);

    ASSERT_ALIGNED(dst);
    vdst = vec_ld(0, dst);

    OP_U8_ALTIVEC(fsum, sum, vdst);

    vec_st(fsum, 0, dst);

    dst += dstStride;
  }
  POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
}
/* this code assume stride % 16 == 0 *and* tmp is properly aligned */
static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) {
  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_hv_lowpass_num, 1);
  register int i;
  LOAD_ZERO;
  const vec_u8_t permM2 = vec_lvsl(-2, src);
  const vec_u8_t permM1 = vec_lvsl(-1, src);
  const vec_u8_t permP0 = vec_lvsl(+0, src);
  const vec_u8_t permP1 = vec_lvsl(+1, src);
  const vec_u8_t permP2 = vec_lvsl(+2, src);
  const vec_u8_t permP3 = vec_lvsl(+3, src);
  const vec_s16_t v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
  const vec_u32_t v10ui = vec_splat_u32(10);
  const vec_s16_t v5ss = vec_splat_s16(5);
  const vec_s16_t v1ss = vec_splat_s16(1);
  const vec_s32_t v512si = vec_sl(vec_splat_s32(1),vec_splat_u32(9));
  const vec_u32_t v16ui = vec_sl(vec_splat_u32(1),vec_splat_u32(4));

  register int align = ((((unsigned long)src) - 2) % 16);

  vec_s16_t srcP0A, srcP0B, srcP1A, srcP1B,
                      srcP2A, srcP2B, srcP3A, srcP3B,
                      srcM1A, srcM1B, srcM2A, srcM2B,
                      sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
                      pp1A, pp1B, pp2A, pp2B, psumA, psumB;

  const vec_u8_t mperm = (const vec_u8_t)
    AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
        0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F);
  int16_t *tmpbis = tmp;

  vec_s16_t tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB,
                      tmpP0ssA, tmpP0ssB, tmpP1ssA, tmpP1ssB,
                      tmpP2ssA, tmpP2ssB;

  vec_s32_t pp1Ae, pp1Ao, pp1Be, pp1Bo, pp2Ae, pp2Ao, pp2Be, pp2Bo,
                    pp3Ae, pp3Ao, pp3Be, pp3Bo, pp1cAe, pp1cAo, pp1cBe, pp1cBo,
                    pp32Ae, pp32Ao, pp32Be, pp32Bo, sumAe, sumAo, sumBe, sumBo,
                    ssumAe, ssumAo, ssumBe, ssumBo;
  vec_u8_t fsum, sumv, sum, vdst;
  vec_s16_t ssume, ssumo;

  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
  src -= (2 * srcStride);
  for (i = 0 ; i < 21 ; i ++) {
    vec_u8_t srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
    vec_u8_t srcR1 = vec_ld(-2, src);
    vec_u8_t srcR2 = vec_ld(14, src);

    switch (align) {
    default: {
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = vec_perm(srcR1, srcR2, permP2);
      srcP3 = vec_perm(srcR1, srcR2, permP3);
    } break;
    case 11: {
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = vec_perm(srcR1, srcR2, permP2);
      srcP3 = srcR2;
    } break;
    case 12: {
      vec_u8_t srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = srcR2;
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 13: {
      vec_u8_t srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = srcR2;
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 14: {
      vec_u8_t srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = srcR2;
      srcP1 = vec_perm(srcR2, srcR3, permP1);
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 15: {
      vec_u8_t srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = srcR2;
      srcP0 = vec_perm(srcR2, srcR3, permP0);
      srcP1 = vec_perm(srcR2, srcR3, permP1);
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    }

    srcP0A = (vec_s16_t) vec_mergeh(zero_u8v, srcP0);
    srcP0B = (vec_s16_t) vec_mergel(zero_u8v, srcP0);
    srcP1A = (vec_s16_t) vec_mergeh(zero_u8v, srcP1);
    srcP1B = (vec_s16_t) vec_mergel(zero_u8v, srcP1);

    srcP2A = (vec_s16_t) vec_mergeh(zero_u8v, srcP2);
    srcP2B = (vec_s16_t) vec_mergel(zero_u8v, srcP2);
    srcP3A = (vec_s16_t) vec_mergeh(zero_u8v, srcP3);
    srcP3B = (vec_s16_t) vec_mergel(zero_u8v, srcP3);

    srcM1A = (vec_s16_t) vec_mergeh(zero_u8v, srcM1);
    srcM1B = (vec_s16_t) vec_mergel(zero_u8v, srcM1);
    srcM2A = (vec_s16_t) vec_mergeh(zero_u8v, srcM2);
    srcM2B = (vec_s16_t) vec_mergel(zero_u8v, srcM2);

    sum1A = vec_adds(srcP0A, srcP1A);
    sum1B = vec_adds(srcP0B, srcP1B);
    sum2A = vec_adds(srcM1A, srcP2A);
    sum2B = vec_adds(srcM1B, srcP2B);
    sum3A = vec_adds(srcM2A, srcP3A);
    sum3B = vec_adds(srcM2B, srcP3B);

    pp1A = vec_mladd(sum1A, v20ss, sum3A);
    pp1B = vec_mladd(sum1B, v20ss, sum3B);

    pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
    pp2B = vec_mladd(sum2B, v5ss, zero_s16v);

    psumA = vec_sub(pp1A, pp2A);
    psumB = vec_sub(pp1B, pp2B);

    vec_st(psumA, 0, tmp);
    vec_st(psumB, 16, tmp);

    src += srcStride;
    tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */
  }

  tmpM2ssA = vec_ld(0, tmpbis);
  tmpM2ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;
  tmpM1ssA = vec_ld(0, tmpbis);
  tmpM1ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;
  tmpP0ssA = vec_ld(0, tmpbis);
  tmpP0ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;
  tmpP1ssA = vec_ld(0, tmpbis);
  tmpP1ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;
  tmpP2ssA = vec_ld(0, tmpbis);
  tmpP2ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;

  for (i = 0 ; i < 16 ; i++) {
    const vec_s16_t tmpP3ssA = vec_ld(0, tmpbis);
    const vec_s16_t tmpP3ssB = vec_ld(16, tmpbis);

    const vec_s16_t sum1A = vec_adds(tmpP0ssA, tmpP1ssA);
    const vec_s16_t sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
    const vec_s16_t sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
    const vec_s16_t sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
    const vec_s16_t sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
    const vec_s16_t sum3B = vec_adds(tmpM2ssB, tmpP3ssB);

    tmpbis += tmpStride;

    tmpM2ssA = tmpM1ssA;
    tmpM2ssB = tmpM1ssB;
    tmpM1ssA = tmpP0ssA;
    tmpM1ssB = tmpP0ssB;
    tmpP0ssA = tmpP1ssA;
    tmpP0ssB = tmpP1ssB;
    tmpP1ssA = tmpP2ssA;
    tmpP1ssB = tmpP2ssB;
    tmpP2ssA = tmpP3ssA;
    tmpP2ssB = tmpP3ssB;

    pp1Ae = vec_mule(sum1A, v20ss);
    pp1Ao = vec_mulo(sum1A, v20ss);
    pp1Be = vec_mule(sum1B, v20ss);
    pp1Bo = vec_mulo(sum1B, v20ss);

    pp2Ae = vec_mule(sum2A, v5ss);
    pp2Ao = vec_mulo(sum2A, v5ss);
    pp2Be = vec_mule(sum2B, v5ss);
    pp2Bo = vec_mulo(sum2B, v5ss);

    pp3Ae = vec_sra((vec_s32_t)sum3A, v16ui);
    pp3Ao = vec_mulo(sum3A, v1ss);
    pp3Be = vec_sra((vec_s32_t)sum3B, v16ui);
    pp3Bo = vec_mulo(sum3B, v1ss);

    pp1cAe = vec_add(pp1Ae, v512si);
    pp1cAo = vec_add(pp1Ao, v512si);
    pp1cBe = vec_add(pp1Be, v512si);
    pp1cBo = vec_add(pp1Bo, v512si);

    pp32Ae = vec_sub(pp3Ae, pp2Ae);
    pp32Ao = vec_sub(pp3Ao, pp2Ao);
    pp32Be = vec_sub(pp3Be, pp2Be);
    pp32Bo = vec_sub(pp3Bo, pp2Bo);

    sumAe = vec_add(pp1cAe, pp32Ae);
    sumAo = vec_add(pp1cAo, pp32Ao);
    sumBe = vec_add(pp1cBe, pp32Be);
    sumBo = vec_add(pp1cBo, pp32Bo);

    ssumAe = vec_sra(sumAe, v10ui);
    ssumAo = vec_sra(sumAo, v10ui);
    ssumBe = vec_sra(sumBe, v10ui);
    ssumBo = vec_sra(sumBo, v10ui);

    ssume = vec_packs(ssumAe, ssumBe);
    ssumo = vec_packs(ssumAo, ssumBo);

    sumv = vec_packsu(ssume, ssumo);
    sum = vec_perm(sumv, sumv, mperm);

    ASSERT_ALIGNED(dst);
    vdst = vec_ld(0, dst);

    OP_U8_ALTIVEC(fsum, sum, vdst);

    vec_st(fsum, 0, dst);

    dst += dstStride;
  }
  POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
}
Example #13
0
/*
  AltiVec version of dct_unquantize_h263
  this code assumes `block' is 16 bytes-aligned
*/
void dct_unquantize_h263_altivec(MpegEncContext *s, 
                                 DCTELEM *block, int n, int qscale)
{
POWERPC_PERF_DECLARE(altivec_dct_unquantize_h263_num, 1);
    int i, level, qmul, qadd;
    int nCoeffs;
    
    assert(s->block_last_index[n]>=0);

POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1);
    
    qadd = (qscale - 1) | 1;
    qmul = qscale << 1;
    
    if (s->mb_intra) {
        if (!s->h263_aic) {
            if (n < 4) 
                block[0] = block[0] * s->y_dc_scale;
            else
                block[0] = block[0] * s->c_dc_scale;
        }else
            qadd = 0;
        i = 1;
        nCoeffs= 63; //does not allways use zigzag table 
    } else {
        i = 0;
        nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
    }

#ifdef ALTIVEC_USE_REFERENCE_C_CODE
    for(;i<=nCoeffs;i++) {
        level = block[i];
        if (level) {
            if (level < 0) {
                level = level * qmul - qadd;
            } else {
                level = level * qmul + qadd;
            }
            block[i] = level;
        }
    }
#else /* ALTIVEC_USE_REFERENCE_C_CODE */
    {
      register const vector short vczero = (const vector short)vec_splat_s16(0);
      short __attribute__ ((aligned(16))) qmul8[] =
          {
            qmul, qmul, qmul, qmul,
            qmul, qmul, qmul, qmul
          };
      short __attribute__ ((aligned(16))) qadd8[] =
          {
            qadd, qadd, qadd, qadd,
            qadd, qadd, qadd, qadd
          };
      short __attribute__ ((aligned(16))) nqadd8[] =
          {
            -qadd, -qadd, -qadd, -qadd,
            -qadd, -qadd, -qadd, -qadd
          };
      register vector short blockv, qmulv, qaddv, nqaddv, temp1;
      register vector bool short blockv_null, blockv_neg;
      register short backup_0 = block[0];
      register int j = 0;
      
      qmulv = vec_ld(0, qmul8);
      qaddv = vec_ld(0, qadd8);
      nqaddv = vec_ld(0, nqadd8);

#if 0 // block *is* 16 bytes-aligned, it seems.
      // first make sure block[j] is 16 bytes-aligned
      for(j = 0; (j <= nCoeffs) && ((((unsigned long)block) + (j << 1)) & 0x0000000F) ; j++) {
        level = block[j];
        if (level) {
          if (level < 0) {
                level = level * qmul - qadd;
            } else {
                level = level * qmul + qadd;
            }
            block[j] = level;
        }
      }
#endif
      
      // vectorize all the 16 bytes-aligned blocks
      // of 8 elements
      for(; (j + 7) <= nCoeffs ; j+=8)
      {
        blockv = vec_ld(j << 1, block);
        blockv_neg = vec_cmplt(blockv, vczero);
        blockv_null = vec_cmpeq(blockv, vczero);
        // choose between +qadd or -qadd as the third operand
        temp1 = vec_sel(qaddv, nqaddv, blockv_neg);
        // multiply & add (block{i,i+7} * qmul [+-] qadd)
        temp1 = vec_mladd(blockv, qmulv, temp1);
        // put 0 where block[{i,i+7} used to have 0
        blockv = vec_sel(temp1, blockv, blockv_null);
        vec_st(blockv, j << 1, block);
      }

      // if nCoeffs isn't a multiple of 8, finish the job
      // using good old scalar units.
      // (we could do it using a truncated vector,
      // but I'm not sure it's worth the hassle)
      for(; j <= nCoeffs ; j++) {
        level = block[j];
        if (level) {
          if (level < 0) {
                level = level * qmul - qadd;
            } else {
                level = level * qmul + qadd;
            }
            block[j] = level;
        }
      }
      
      if (i == 1)
      { // cheat. this avoid special-casing the first iteration
        block[0] = backup_0;
      }
    }
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */

POWERPC_PERF_STOP_COUNT(altivec_dct_unquantize_h263_num, nCoeffs == 63);
}
/* this code assume stride % 16 == 0 */
static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1);
  register int i;

  LOAD_ZERO;
  const vec_u8_t permM2 = vec_lvsl(-2, src);
  const vec_u8_t permM1 = vec_lvsl(-1, src);
  const vec_u8_t permP0 = vec_lvsl(+0, src);
  const vec_u8_t permP1 = vec_lvsl(+1, src);
  const vec_u8_t permP2 = vec_lvsl(+2, src);
  const vec_u8_t permP3 = vec_lvsl(+3, src);
  const vec_s16_t v5ss = vec_splat_s16(5);
  const vec_u16_t v5us = vec_splat_u16(5);
  const vec_s16_t v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
  const vec_s16_t v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));

  vec_u8_t srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;

  register int align = ((((unsigned long)src) - 2) % 16);

  vec_s16_t srcP0A, srcP0B, srcP1A, srcP1B,
                      srcP2A, srcP2B, srcP3A, srcP3B,
                      srcM1A, srcM1B, srcM2A, srcM2B,
                      sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
                      pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
                      psumA, psumB, sumA, sumB;

  vec_u8_t sum, vdst, fsum;

  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);

  for (i = 0 ; i < 16 ; i ++) {
    vec_u8_t srcR1 = vec_ld(-2, src);
    vec_u8_t srcR2 = vec_ld(14, src);

    switch (align) {
    default: {
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = vec_perm(srcR1, srcR2, permP2);
      srcP3 = vec_perm(srcR1, srcR2, permP3);
    } break;
    case 11: {
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = vec_perm(srcR1, srcR2, permP2);
      srcP3 = srcR2;
    } break;
    case 12: {
      vec_u8_t srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = srcR2;
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 13: {
      vec_u8_t srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = srcR2;
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 14: {
      vec_u8_t srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = srcR2;
      srcP1 = vec_perm(srcR2, srcR3, permP1);
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 15: {
      vec_u8_t srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = srcR2;
      srcP0 = vec_perm(srcR2, srcR3, permP0);
      srcP1 = vec_perm(srcR2, srcR3, permP1);
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    }

    srcP0A = (vec_s16_t) vec_mergeh(zero_u8v, srcP0);
    srcP0B = (vec_s16_t) vec_mergel(zero_u8v, srcP0);
    srcP1A = (vec_s16_t) vec_mergeh(zero_u8v, srcP1);
    srcP1B = (vec_s16_t) vec_mergel(zero_u8v, srcP1);

    srcP2A = (vec_s16_t) vec_mergeh(zero_u8v, srcP2);
    srcP2B = (vec_s16_t) vec_mergel(zero_u8v, srcP2);
    srcP3A = (vec_s16_t) vec_mergeh(zero_u8v, srcP3);
    srcP3B = (vec_s16_t) vec_mergel(zero_u8v, srcP3);

    srcM1A = (vec_s16_t) vec_mergeh(zero_u8v, srcM1);
    srcM1B = (vec_s16_t) vec_mergel(zero_u8v, srcM1);
    srcM2A = (vec_s16_t) vec_mergeh(zero_u8v, srcM2);
    srcM2B = (vec_s16_t) vec_mergel(zero_u8v, srcM2);

    sum1A = vec_adds(srcP0A, srcP1A);
    sum1B = vec_adds(srcP0B, srcP1B);
    sum2A = vec_adds(srcM1A, srcP2A);
    sum2B = vec_adds(srcM1B, srcP2B);
    sum3A = vec_adds(srcM2A, srcP3A);
    sum3B = vec_adds(srcM2B, srcP3B);

    pp1A = vec_mladd(sum1A, v20ss, v16ss);
    pp1B = vec_mladd(sum1B, v20ss, v16ss);

    pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
    pp2B = vec_mladd(sum2B, v5ss, zero_s16v);

    pp3A = vec_add(sum3A, pp1A);
    pp3B = vec_add(sum3B, pp1B);

    psumA = vec_sub(pp3A, pp2A);
    psumB = vec_sub(pp3B, pp2B);

    sumA = vec_sra(psumA, v5us);
    sumB = vec_sra(psumB, v5us);

    sum = vec_packsu(sumA, sumB);

    ASSERT_ALIGNED(dst);
    vdst = vec_ld(0, dst);

    OP_U8_ALTIVEC(fsum, sum, vdst);

    vec_st(fsum, 0, dst);

    src += srcStride;
    dst += dstStride;
  }
POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
}
/* this code assume stride % 16 == 0 *and* tmp is properly aligned */
static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) {
  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_hv_lowpass_num, 1);
  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
  register int i;
  const vector signed int vzero = vec_splat_s32(0);
  const vector unsigned char permM2 = vec_lvsl(-2, src);
  const vector unsigned char permM1 = vec_lvsl(-1, src);
  const vector unsigned char permP0 = vec_lvsl(+0, src);
  const vector unsigned char permP1 = vec_lvsl(+1, src);
  const vector unsigned char permP2 = vec_lvsl(+2, src);
  const vector unsigned char permP3 = vec_lvsl(+3, src);
  const vector signed short v20ss = (const vector signed short)AVV(20);
  const vector unsigned int v10ui = vec_splat_u32(10);
  const vector signed short v5ss = vec_splat_s16(5);
  const vector signed short v1ss = vec_splat_s16(1);
  const vector signed int v512si = (const vector signed int)AVV(512);
  const vector unsigned int v16ui = (const vector unsigned int)AVV(16);

  register int align = ((((unsigned long)src) - 2) % 16);

  src -= (2 * srcStride);

  for (i = 0 ; i < 21 ; i ++) {
    vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
    vector unsigned char srcR1 = vec_ld(-2, src);
    vector unsigned char srcR2 = vec_ld(14, src);

    switch (align) {
    default: {
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = vec_perm(srcR1, srcR2, permP2);
      srcP3 = vec_perm(srcR1, srcR2, permP3);
    } break;
    case 11: {
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = vec_perm(srcR1, srcR2, permP2);
      srcP3 = srcR2;
    } break;
    case 12: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = vec_perm(srcR1, srcR2, permP1);
      srcP2 = srcR2;
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 13: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = vec_perm(srcR1, srcR2, permP0);
      srcP1 = srcR2;
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 14: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = vec_perm(srcR1, srcR2, permM1);
      srcP0 = srcR2;
      srcP1 = vec_perm(srcR2, srcR3, permP1);
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    case 15: {
      vector unsigned char srcR3 = vec_ld(30, src);
      srcM2 = vec_perm(srcR1, srcR2, permM2);
      srcM1 = srcR2;
      srcP0 = vec_perm(srcR2, srcR3, permP0);
      srcP1 = vec_perm(srcR2, srcR3, permP1);
      srcP2 = vec_perm(srcR2, srcR3, permP2);
      srcP3 = vec_perm(srcR2, srcR3, permP3);
    } break;
    }

    const vector signed short srcP0A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0);
    const vector signed short srcP0B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0);
    const vector signed short srcP1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1);
    const vector signed short srcP1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1);

    const vector signed short srcP2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2);
    const vector signed short srcP2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2);
    const vector signed short srcP3A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3);
    const vector signed short srcP3B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3);

    const vector signed short srcM1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1);
    const vector signed short srcM1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1);
    const vector signed short srcM2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2);
    const vector signed short srcM2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2);

    const vector signed short sum1A = vec_adds(srcP0A, srcP1A);
    const vector signed short sum1B = vec_adds(srcP0B, srcP1B);
    const vector signed short sum2A = vec_adds(srcM1A, srcP2A);
    const vector signed short sum2B = vec_adds(srcM1B, srcP2B);
    const vector signed short sum3A = vec_adds(srcM2A, srcP3A);
    const vector signed short sum3B = vec_adds(srcM2B, srcP3B);
    
    const vector signed short pp1A = vec_mladd(sum1A, v20ss, sum3A);
    const vector signed short pp1B = vec_mladd(sum1B, v20ss, sum3B);

    const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
    const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);

    const vector signed short psumA = vec_sub(pp1A, pp2A);
    const vector signed short psumB = vec_sub(pp1B, pp2B);

    vec_st(psumA, 0, tmp);
    vec_st(psumB, 16, tmp);
    
    src += srcStride;
    tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */
  }
  
  const vector unsigned char dstperm = vec_lvsr(0, dst);
  const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1);
  const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
  const vector unsigned char mperm = (const vector unsigned char)
    AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
        0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F);
  
  int16_t *tmpbis = tmp - (tmpStride * 21);

  vector signed short tmpM2ssA = vec_ld(0, tmpbis);
  vector signed short tmpM2ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;
  vector signed short tmpM1ssA = vec_ld(0, tmpbis);
  vector signed short tmpM1ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;
  vector signed short tmpP0ssA = vec_ld(0, tmpbis);
  vector signed short tmpP0ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;
  vector signed short tmpP1ssA = vec_ld(0, tmpbis);
  vector signed short tmpP1ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;
  vector signed short tmpP2ssA = vec_ld(0, tmpbis);
  vector signed short tmpP2ssB = vec_ld(16, tmpbis);
  tmpbis += tmpStride;

  for (i = 0 ; i < 16 ; i++) {
    const vector signed short tmpP3ssA = vec_ld(0, tmpbis);
    const vector signed short tmpP3ssB = vec_ld(16, tmpbis);
    tmpbis += tmpStride;

    const vector signed short sum1A = vec_adds(tmpP0ssA, tmpP1ssA);
    const vector signed short sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
    const vector signed short sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
    const vector signed short sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
    const vector signed short sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
    const vector signed short sum3B = vec_adds(tmpM2ssB, tmpP3ssB);

    tmpM2ssA = tmpM1ssA;
    tmpM2ssB = tmpM1ssB;
    tmpM1ssA = tmpP0ssA;
    tmpM1ssB = tmpP0ssB;
    tmpP0ssA = tmpP1ssA;
    tmpP0ssB = tmpP1ssB;
    tmpP1ssA = tmpP2ssA;
    tmpP1ssB = tmpP2ssB;
    tmpP2ssA = tmpP3ssA;
    tmpP2ssB = tmpP3ssB;

    const vector signed int pp1Ae = vec_mule(sum1A, v20ss);
    const vector signed int pp1Ao = vec_mulo(sum1A, v20ss);
    const vector signed int pp1Be = vec_mule(sum1B, v20ss);
    const vector signed int pp1Bo = vec_mulo(sum1B, v20ss);

    const vector signed int pp2Ae = vec_mule(sum2A, v5ss);
    const vector signed int pp2Ao = vec_mulo(sum2A, v5ss);
    const vector signed int pp2Be = vec_mule(sum2B, v5ss);
    const vector signed int pp2Bo = vec_mulo(sum2B, v5ss);

    const vector signed int pp3Ae = vec_sra((vector signed int)sum3A, v16ui);
    const vector signed int pp3Ao = vec_mulo(sum3A, v1ss);
    const vector signed int pp3Be = vec_sra((vector signed int)sum3B, v16ui);
    const vector signed int pp3Bo = vec_mulo(sum3B, v1ss);

    const vector signed int pp1cAe = vec_add(pp1Ae, v512si);
    const vector signed int pp1cAo = vec_add(pp1Ao, v512si);
    const vector signed int pp1cBe = vec_add(pp1Be, v512si);
    const vector signed int pp1cBo = vec_add(pp1Bo, v512si);

    const vector signed int pp32Ae = vec_sub(pp3Ae, pp2Ae);
    const vector signed int pp32Ao = vec_sub(pp3Ao, pp2Ao);
    const vector signed int pp32Be = vec_sub(pp3Be, pp2Be);
    const vector signed int pp32Bo = vec_sub(pp3Bo, pp2Bo);

    const vector signed int sumAe = vec_add(pp1cAe, pp32Ae);
    const vector signed int sumAo = vec_add(pp1cAo, pp32Ao);
    const vector signed int sumBe = vec_add(pp1cBe, pp32Be);
    const vector signed int sumBo = vec_add(pp1cBo, pp32Bo);
    
    const vector signed int ssumAe = vec_sra(sumAe, v10ui);
    const vector signed int ssumAo = vec_sra(sumAo, v10ui);
    const vector signed int ssumBe = vec_sra(sumBe, v10ui);
    const vector signed int ssumBo = vec_sra(sumBo, v10ui);

    const vector signed short ssume = vec_packs(ssumAe, ssumBe);
    const vector signed short ssumo = vec_packs(ssumAo, ssumBo);

    const vector unsigned char sumv = vec_packsu(ssume, ssumo);
    const vector unsigned char sum = vec_perm(sumv, sumv, mperm);

    const vector unsigned char dst1 = vec_ld(0, dst);
    const vector unsigned char dst2 = vec_ld(16, dst);
    const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));

    vector unsigned char fsum;
    OP_U8_ALTIVEC(fsum, sum, vdst);

    const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm);
    const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask);
    const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask);

    vec_st(fdst1, 0, dst);
    vec_st(fdst2, 16, dst);

    dst += dstStride;
  }
  POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
}
/* this code assume stride % 16 == 0 */
static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_v_lowpass_num, 1);
  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
  
  register int i;

  const vector signed int vzero = vec_splat_s32(0);
  const vector unsigned char perm = vec_lvsl(0, src);
  const vector signed short v20ss = (const vector signed short)AVV(20);
  const vector unsigned short v5us = vec_splat_u16(5);
  const vector signed short v5ss = vec_splat_s16(5);
  const vector signed short v16ss = (const vector signed short)AVV(16);
  const vector unsigned char dstperm = vec_lvsr(0, dst);
  const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1);
  const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
  
  uint8_t *srcbis = src - (srcStride * 2);

  const vector unsigned char srcM2a = vec_ld(0, srcbis);
  const vector unsigned char srcM2b = vec_ld(16, srcbis);
  const vector unsigned char srcM2 = vec_perm(srcM2a, srcM2b, perm);
  srcbis += srcStride;
  const vector unsigned char srcM1a = vec_ld(0, srcbis);
  const vector unsigned char srcM1b = vec_ld(16, srcbis);
  const vector unsigned char srcM1 = vec_perm(srcM1a, srcM1b, perm);
  srcbis += srcStride;
  const vector unsigned char srcP0a = vec_ld(0, srcbis);
  const vector unsigned char srcP0b = vec_ld(16, srcbis);
  const vector unsigned char srcP0 = vec_perm(srcP0a, srcP0b, perm);
  srcbis += srcStride;
  const vector unsigned char srcP1a = vec_ld(0, srcbis);
  const vector unsigned char srcP1b = vec_ld(16, srcbis);
  const vector unsigned char srcP1 = vec_perm(srcP1a, srcP1b, perm);
  srcbis += srcStride;
  const vector unsigned char srcP2a = vec_ld(0, srcbis);
  const vector unsigned char srcP2b = vec_ld(16, srcbis);
  const vector unsigned char srcP2 = vec_perm(srcP2a, srcP2b, perm);
  srcbis += srcStride;

  vector signed short srcM2ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2);
  vector signed short srcM2ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2);
  vector signed short srcM1ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1);
  vector signed short srcM1ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1);
  vector signed short srcP0ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0);
  vector signed short srcP0ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0);
  vector signed short srcP1ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1);
  vector signed short srcP1ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1);
  vector signed short srcP2ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2);
  vector signed short srcP2ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2);

  for (i = 0 ; i < 16 ; i++) {
    const vector unsigned char srcP3a = vec_ld(0, srcbis);
    const vector unsigned char srcP3b = vec_ld(16, srcbis);
    const vector unsigned char srcP3 = vec_perm(srcP3a, srcP3b, perm);
    const vector signed short srcP3ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3);
    const vector signed short srcP3ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3);
    srcbis += srcStride;

    const vector signed short sum1A = vec_adds(srcP0ssA, srcP1ssA);
    const vector signed short sum1B = vec_adds(srcP0ssB, srcP1ssB);
    const vector signed short sum2A = vec_adds(srcM1ssA, srcP2ssA);
    const vector signed short sum2B = vec_adds(srcM1ssB, srcP2ssB);
    const vector signed short sum3A = vec_adds(srcM2ssA, srcP3ssA);
    const vector signed short sum3B = vec_adds(srcM2ssB, srcP3ssB);

    srcM2ssA = srcM1ssA;
    srcM2ssB = srcM1ssB;
    srcM1ssA = srcP0ssA;
    srcM1ssB = srcP0ssB;
    srcP0ssA = srcP1ssA;
    srcP0ssB = srcP1ssB;
    srcP1ssA = srcP2ssA;
    srcP1ssB = srcP2ssB;
    srcP2ssA = srcP3ssA;
    srcP2ssB = srcP3ssB;
    
    const vector signed short pp1A = vec_mladd(sum1A, v20ss, v16ss);
    const vector signed short pp1B = vec_mladd(sum1B, v20ss, v16ss);

    const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
    const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
    
    const vector signed short pp3A = vec_add(sum3A, pp1A);
    const vector signed short pp3B = vec_add(sum3B, pp1B);

    const vector signed short psumA = vec_sub(pp3A, pp2A);
    const vector signed short psumB = vec_sub(pp3B, pp2B);

    const vector signed short sumA = vec_sra(psumA, v5us);
    const vector signed short sumB = vec_sra(psumB, v5us);

    const vector unsigned char sum = vec_packsu(sumA, sumB);

    const vector unsigned char dst1 = vec_ld(0, dst);
    const vector unsigned char dst2 = vec_ld(16, dst);
    const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));

    vector unsigned char fsum;
    OP_U8_ALTIVEC(fsum, sum, vdst);

    const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm);
    const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask);
    const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask);

    vec_st(fdst1, 0, dst);
    vec_st(fdst2, 16, dst);

    dst += dstStride;
  }
  POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
}
/* this code assume that stride % 16 == 0 */
void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
  POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1);
  POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1);
    signed int ABCD[4] __attribute__((aligned(16)));
    register int i;
    ABCD[0] = ((8 - x) * (8 - y));
    ABCD[1] = ((x) * (8 - y));
    ABCD[2] = ((8 - x) * (y));
    ABCD[3] = ((x) * (y));
    const vector signed int vABCD = vec_ld(0, ABCD);
    const vector signed short vA = vec_splat((vector signed short)vABCD, 1);
    const vector signed short vB = vec_splat((vector signed short)vABCD, 3);
    const vector signed short vC = vec_splat((vector signed short)vABCD, 5);
    const vector signed short vD = vec_splat((vector signed short)vABCD, 7);
    const vector signed int vzero = vec_splat_s32(0);
    const vector signed short v32ss = (const vector signed short)AVV(32);
    const vector unsigned short v6us = vec_splat_u16(6);

    vector unsigned char fperm;

    if (((unsigned long)dst) % 16 == 0) {
      fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
                                        0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
    } else {
      fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
                                        0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
    }

    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
    
    vector unsigned char vsrcAuc;
    vector unsigned char vsrcBuc;
    vector unsigned char vsrcperm0;
    vector unsigned char vsrcperm1;
    vsrcAuc = vec_ld(0, src);
    if (loadSecond)
      vsrcBuc = vec_ld(16, src);
    vsrcperm0 = vec_lvsl(0, src);
    vsrcperm1 = vec_lvsl(1, src);
    
    vector unsigned char vsrc0uc;
    vector unsigned char vsrc1uc;
    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
    if (reallyBadAlign)
      vsrc1uc = vsrcBuc;
    else
      vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
    
    vector signed short vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc0uc);
    vector signed short vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc1uc);

    if (!loadSecond) {// -> !reallyBadAlign
      for (i = 0 ; i < h ; i++) {
        vector unsigned char vsrcCuc;
        vsrcCuc = vec_ld(stride + 0, src);
        
        vector unsigned char vsrc2uc;
        vector unsigned char vsrc3uc;
        vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
        vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
        
        vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc);
        vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc);
        
        vector signed short psum;
        
        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v32ss, psum);
        psum = vec_sra(psum, v6us);
        
        vector unsigned char vdst = vec_ld(0, dst);
        vector unsigned char ppsum = (vector unsigned char)vec_packsu(psum, psum);
        
        vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm);
        vector unsigned char fsum;
        
        OP_U8_ALTIVEC(fsum, vfdst, vdst);

        vec_st(fsum, 0, dst);
        
        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;
        
        dst += stride;
        src += stride;
      }
    } else {
      for (i = 0 ; i < h ; i++) {
        vector unsigned char vsrcCuc;
        vector unsigned char vsrcDuc;
        vsrcCuc = vec_ld(stride + 0, src);
        vsrcDuc = vec_ld(stride + 16, src);
        
        vector unsigned char vsrc2uc;
        vector unsigned char vsrc3uc;
        vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
        if (reallyBadAlign)
          vsrc3uc = vsrcDuc;
        else
          vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
        
        vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc);
        vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc);
        
        vector signed short psum;
      
        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v32ss, psum);
        psum = vec_sr(psum, v6us);
        
        vector unsigned char vdst = vec_ld(0, dst);
        vector unsigned char ppsum = (vector unsigned char)vec_pack(psum, psum); 
        
        vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm);
        vector unsigned char fsum;
        
        OP_U8_ALTIVEC(fsum, vfdst, vdst);

        vec_st(fsum, 0, dst);
        
        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;
        
        dst += stride;
        src += stride;
      }
    }
    POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1);
}