Example #1
0
static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t *dst,
                                                 const uint8_t *src,
                                                 int dstStride, int srcStride)
{
    register int i;

    LOAD_ZERO;
    const vec_u8 perm = vec_lvsl(0, src);
    const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
    const vec_u16 v5us = vec_splat_u16(5);
    const vec_s16 v5ss = vec_splat_s16(5);
    const vec_s16 v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));

    const uint8_t *srcbis = src - (srcStride * 2);

    const vec_u8 srcM2a = vec_ld(0, srcbis);
    const vec_u8 srcM2b = vec_ld(16, srcbis);
    const vec_u8 srcM2 = vec_perm(srcM2a, srcM2b, perm);
    //srcbis += srcStride;
    const vec_u8 srcM1a = vec_ld(0, srcbis += srcStride);
    const vec_u8 srcM1b = vec_ld(16, srcbis);
    const vec_u8 srcM1 = vec_perm(srcM1a, srcM1b, perm);
    //srcbis += srcStride;
    const vec_u8 srcP0a = vec_ld(0, srcbis += srcStride);
    const vec_u8 srcP0b = vec_ld(16, srcbis);
    const vec_u8 srcP0 = vec_perm(srcP0a, srcP0b, perm);
    //srcbis += srcStride;
    const vec_u8 srcP1a = vec_ld(0, srcbis += srcStride);
    const vec_u8 srcP1b = vec_ld(16, srcbis);
    const vec_u8 srcP1 = vec_perm(srcP1a, srcP1b, perm);
    //srcbis += srcStride;
    const vec_u8 srcP2a = vec_ld(0, srcbis += srcStride);
    const vec_u8 srcP2b = vec_ld(16, srcbis);
    const vec_u8 srcP2 = vec_perm(srcP2a, srcP2b, perm);
    //srcbis += srcStride;

    vec_s16 srcM2ssA = (vec_s16) vec_mergeh(zero_u8v, srcM2);
    vec_s16 srcM2ssB = (vec_s16) vec_mergel(zero_u8v, srcM2);
    vec_s16 srcM1ssA = (vec_s16) vec_mergeh(zero_u8v, srcM1);
    vec_s16 srcM1ssB = (vec_s16) vec_mergel(zero_u8v, srcM1);
    vec_s16 srcP0ssA = (vec_s16) vec_mergeh(zero_u8v, srcP0);
    vec_s16 srcP0ssB = (vec_s16) vec_mergel(zero_u8v, srcP0);
    vec_s16 srcP1ssA = (vec_s16) vec_mergeh(zero_u8v, srcP1);
    vec_s16 srcP1ssB = (vec_s16) vec_mergel(zero_u8v, srcP1);
    vec_s16 srcP2ssA = (vec_s16) vec_mergeh(zero_u8v, srcP2);
    vec_s16 srcP2ssB = (vec_s16) vec_mergel(zero_u8v, srcP2);

    vec_s16 pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
              psumA, psumB, sumA, sumB,
              srcP3ssA, srcP3ssB,
              sum1A, sum1B, sum2A, sum2B, sum3A, sum3B;

    vec_u8 sum, fsum, srcP3a, srcP3b, srcP3;

    for (i = 0 ; i < 16 ; i++) {
        srcP3a = vec_ld(0, srcbis += srcStride);
        srcP3b = vec_ld(16, srcbis);
        srcP3 = vec_perm(srcP3a, srcP3b, perm);
        srcP3ssA = (vec_s16) vec_mergeh(zero_u8v, srcP3);
        srcP3ssB = (vec_s16) vec_mergel(zero_u8v, srcP3);
        //srcbis += srcStride;

        sum1A = vec_adds(srcP0ssA, srcP1ssA);
        sum1B = vec_adds(srcP0ssB, srcP1ssB);
        sum2A = vec_adds(srcM1ssA, srcP2ssA);
        sum2B = vec_adds(srcM1ssB, srcP2ssB);
        sum3A = vec_adds(srcM2ssA, srcP3ssA);
        sum3B = vec_adds(srcM2ssB, srcP3ssB);

        srcM2ssA = srcM1ssA;
        srcM2ssB = srcM1ssB;
        srcM1ssA = srcP0ssA;
        srcM1ssB = srcP0ssB;
        srcP0ssA = srcP1ssA;
        srcP0ssB = srcP1ssB;
        srcP1ssA = srcP2ssA;
        srcP1ssB = srcP2ssB;
        srcP2ssA = srcP3ssA;
        srcP2ssB = srcP3ssB;

        pp1A = vec_mladd(sum1A, v20ss, v16ss);
        pp1B = vec_mladd(sum1B, v20ss, v16ss);

        pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
        pp2B = vec_mladd(sum2B, v5ss, zero_s16v);

        pp3A = vec_add(sum3A, pp1A);
        pp3B = vec_add(sum3B, pp1B);

        psumA = vec_sub(pp3A, pp2A);
        psumB = vec_sub(pp3B, pp2B);

        sumA = vec_sra(psumA, v5us);
        sumB = vec_sra(psumB, v5us);

        sum = vec_packsu(sumA, sumB);

        ASSERT_ALIGNED(dst);

        OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));

        vec_st(fsum, 0, dst);

        dst += dstStride;
    }
}
Example #2
0
static av_always_inline
void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
                                 uint8_t *src, ptrdiff_t src_stride,
                                 int h, int my, int w, int is6tap)
{
    LOAD_V_SUBPEL_FILTER(my-1);
    vec_u8 s0, s1, s2, s3, s4, s5, filt, align_vech, perm_vec, align_vecl;
    vec_s16 s0f, s1f, s2f, s3f, s4f, s5f, f16h, f16l;
    vec_s16 c64 = vec_sl(vec_splat_s16(1), vec_splat_u16(6));
    vec_u16 c7  = vec_splat_u16(7);

    // we want pixels 0-7 to be in the even positions and 8-15 in the odd,
    // so combine this permute with the alignment permute vector
    align_vech = vec_lvsl(0, src);
    align_vecl = vec_sld(align_vech, align_vech, 8);
    if (w ==16)
        perm_vec = vec_mergeh(align_vech, align_vecl);
    else
        perm_vec = vec_mergeh(align_vech, align_vech);

    if (is6tap)
        s0 = load_with_perm_vec(-2*src_stride, src, perm_vec);
    s1 = load_with_perm_vec(-1*src_stride, src, perm_vec);
    s2 = load_with_perm_vec( 0*src_stride, src, perm_vec);
    s3 = load_with_perm_vec( 1*src_stride, src, perm_vec);
    if (is6tap)
        s4 = load_with_perm_vec( 2*src_stride, src, perm_vec);

    src += (2+is6tap)*src_stride;

    while (h --> 0) {
        if (is6tap)
            s5 = load_with_perm_vec(0, src, perm_vec);
        else
            s4 = load_with_perm_vec(0, src, perm_vec);

        FILTER_V(f16h, vec_mule);

        if (w == 16) {
            FILTER_V(f16l, vec_mulo);
            filt = vec_packsu(f16h, f16l);
            vec_st(filt, 0, dst);
        } else {
            filt = vec_packsu(f16h, f16h);
            if (w == 4)
                filt = (vec_u8)vec_splat((vec_u32)filt, 0);
            else
                vec_ste((vec_u32)filt, 4, (uint32_t*)dst);
            vec_ste((vec_u32)filt, 0, (uint32_t*)dst);
        }

        if (is6tap)
            s0 = s1;
        s1 = s2;
        s2 = s3;
        s3 = s4;
        if (is6tap)
            s4 = s5;

        dst += dst_stride;
        src += src_stride;
    }
}
static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
                                    int stride, int h, int x, int y) {
  POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1);
    DECLARE_ALIGNED(16, signed int, ABCD)[4] =
                        {((8 - x) * (8 - y)),
                         ((    x) * (8 - y)),
                         ((8 - x) * (    y)),
                         ((    x) * (    y))};
    register int i;
    vec_u8 fperm;
    const vec_s32 vABCD = vec_ld(0, ABCD);
    const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
    const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
    const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
    const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
    LOAD_ZERO;
    const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
    const vec_u16 v6us = vec_splat_u16(6);
    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;

    vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
    vec_u8 vsrc0uc, vsrc1uc;
    vec_s16 vsrc0ssH, vsrc1ssH;
    vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
    vec_s16 vsrc2ssH, vsrc3ssH, psum;
    vec_u8 vdst, ppsum, vfdst, fsum;

  POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1);

    if (((unsigned long)dst) % 16 == 0) {
        fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
                         0x14, 0x15, 0x16, 0x17,
                         0x08, 0x09, 0x0A, 0x0B,
                         0x0C, 0x0D, 0x0E, 0x0F};
    } else {
        fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
                         0x04, 0x05, 0x06, 0x07,
                         0x18, 0x19, 0x1A, 0x1B,
                         0x1C, 0x1D, 0x1E, 0x1F};
    }

    vsrcAuc = vec_ld(0, src);

    if (loadSecond)
        vsrcBuc = vec_ld(16, src);
    vsrcperm0 = vec_lvsl(0, src);
    vsrcperm1 = vec_lvsl(1, src);

    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
    if (reallyBadAlign)
        vsrc1uc = vsrcBuc;
    else
        vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);

    vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);
    vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);

    if (ABCD[3]) {
        if (!loadSecond) {// -> !reallyBadAlign
            for (i = 0 ; i < h ; i++) {
                vsrcCuc = vec_ld(stride + 0, src);
                vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
                vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);

                CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
            }
        } else {
            vec_u8 vsrcDuc;
            for (i = 0 ; i < h ; i++) {
                vsrcCuc = vec_ld(stride + 0, src);
                vsrcDuc = vec_ld(stride + 16, src);
                vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
                if (reallyBadAlign)
                    vsrc3uc = vsrcDuc;
                else
                    vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);

                CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
            }
        }
    } else {
Example #4
0
inline void v_zip(const _Tpvec& a0, const _Tpvec& a1, _Tpvec& b0, _Tpvec& b1)
{
    b0.val = vec_mergeh(a0.val, a1.val);
    b1.val = vec_mergel(a0.val, a1.val);
}
Example #5
0
/* this code assume that stride % 16 == 0 */
void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
   DECLARE_ALIGNED_16(signed int, ABCD[4]) =
                        {((8 - x) * (8 - y)),
                             ((x) * (8 - y)),
                         ((8 - x) * (y)),
                             ((x) * (y))};
    register int i;
    vec_u8_t fperm;
    const vec_s32_t vABCD = vec_ld(0, ABCD);
    const vec_s16_t vA = vec_splat((vec_s16_t)vABCD, 1);
    const vec_s16_t vB = vec_splat((vec_s16_t)vABCD, 3);
    const vec_s16_t vC = vec_splat((vec_s16_t)vABCD, 5);
    const vec_s16_t vD = vec_splat((vec_s16_t)vABCD, 7);
    LOAD_ZERO;
    const vec_s16_t v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
    const vec_u16_t v6us  = vec_splat_u16(6);
    register int loadSecond     = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;

    vec_u8_t vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
    vec_u8_t vsrc0uc, vsrc1uc;
    vec_s16_t vsrc0ssH, vsrc1ssH;
    vec_u8_t vsrcCuc, vsrc2uc, vsrc3uc;
    vec_s16_t vsrc2ssH, vsrc3ssH, psum;
    vec_u8_t vdst, ppsum, fsum;

    if (((unsigned long)dst) % 16 == 0) {
        fperm = (vec_u8_t){0x10, 0x11, 0x12, 0x13,
                           0x14, 0x15, 0x16, 0x17,
                           0x08, 0x09, 0x0A, 0x0B,
                           0x0C, 0x0D, 0x0E, 0x0F};
    } else {
        fperm = (vec_u8_t){0x00, 0x01, 0x02, 0x03,
                           0x04, 0x05, 0x06, 0x07,
                           0x18, 0x19, 0x1A, 0x1B,
                           0x1C, 0x1D, 0x1E, 0x1F};
    }

    vsrcAuc = vec_ld(0, src);

    if (loadSecond)
        vsrcBuc = vec_ld(16, src);
    vsrcperm0 = vec_lvsl(0, src);
    vsrcperm1 = vec_lvsl(1, src);

    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
    if (reallyBadAlign)
        vsrc1uc = vsrcBuc;
    else
        vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);

    vsrc0ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc0uc);
    vsrc1ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc1uc);

    if (!loadSecond) {// -> !reallyBadAlign
        for (i = 0 ; i < h ; i++) {


            vsrcCuc = vec_ld(stride + 0, src);

            vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
            vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);

            vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc2uc);
            vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc3uc);

            psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
            psum = vec_mladd(vB, vsrc1ssH, psum);
            psum = vec_mladd(vC, vsrc2ssH, psum);
            psum = vec_mladd(vD, vsrc3ssH, psum);
            psum = vec_add(v28ss, psum);
            psum = vec_sra(psum, v6us);

            vdst = vec_ld(0, dst);
            ppsum = (vec_u8_t)vec_packsu(psum, psum);
            fsum = vec_perm(vdst, ppsum, fperm);

            vec_st(fsum, 0, dst);

            vsrc0ssH = vsrc2ssH;
            vsrc1ssH = vsrc3ssH;

            dst += stride;
            src += stride;
        }
    } else {
        vec_u8_t vsrcDuc;
        for (i = 0 ; i < h ; i++) {
            vsrcCuc = vec_ld(stride + 0, src);
            vsrcDuc = vec_ld(stride + 16, src);

            vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
            if (reallyBadAlign)
                vsrc3uc = vsrcDuc;
            else
                vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);

            vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc2uc);
            vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc3uc);

            psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
            psum = vec_mladd(vB, vsrc1ssH, psum);
            psum = vec_mladd(vC, vsrc2ssH, psum);
            psum = vec_mladd(vD, vsrc3ssH, psum);
            psum = vec_add(v28ss, psum);
            psum = vec_sr(psum, v6us);

            vdst = vec_ld(0, dst);
            ppsum = (vec_u8_t)vec_pack(psum, psum);
            fsum = vec_perm(vdst, ppsum, fperm);

            vec_st(fsum, 0, dst);

            vsrc0ssH = vsrc2ssH;
            vsrc1ssH = vsrc3ssH;

            dst += stride;
            src += stride;
        }
    }
}
Example #6
0
/* next one assumes that ((line_size % 16) == 0) */
static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, ptrdiff_t line_size, int h)
{
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
    register vector unsigned char blockv, temp1, temp2;
    register vector unsigned short temp3, temp4,
        pixelssum1, pixelssum2, pixelssum3, pixelssum4;
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
    register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
    register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);

    temp1 = vec_ld(0, pixels);
    temp2 = vec_ld(16, pixels);
    pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
    if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
        pixelsv2 = temp2;
    } else {
        pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
    }
    pixelsv3 = vec_mergel(vczero, pixelsv1);
    pixelsv4 = vec_mergel(vczero, pixelsv2);
    pixelsv1 = vec_mergeh(vczero, pixelsv1);
    pixelsv2 = vec_mergeh(vczero, pixelsv2);
    pixelssum3 = vec_add((vector unsigned short)pixelsv3,
                         (vector unsigned short)pixelsv4);
    pixelssum3 = vec_add(pixelssum3, vcone);
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vcone);

    for (i = 0; i < h ; i++) {
        blockv = vec_ld(0, block);

        temp1 = vec_ld(line_size, pixels);
        temp2 = vec_ld(line_size + 16, pixels);
        pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
        if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
            pixelsv2 = temp2;
        } else {
            pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
        }

        pixelsv3 = vec_mergel(vczero, pixelsv1);
        pixelsv4 = vec_mergel(vczero, pixelsv2);
        pixelsv1 = vec_mergeh(vczero, pixelsv1);
        pixelsv2 = vec_mergeh(vczero, pixelsv2);

        pixelssum4 = vec_add((vector unsigned short)pixelsv3,
                             (vector unsigned short)pixelsv4);
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp4 = vec_add(pixelssum3, pixelssum4);
        temp4 = vec_sra(temp4, vctwo);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);

        pixelssum3 = vec_add(pixelssum4, vcone);
        pixelssum1 = vec_add(pixelssum2, vcone);

        blockv = vec_packsu(temp3, temp4);

        vec_st(blockv, 0, block);

        block += line_size;
        pixels += line_size;
    }
}
Example #7
0
/* start of optimized motionblur */
void pix_motionblur :: processYUVAltivec(imageStruct &image)
{
  int h,w,width;
  signed short rightGain,imageGain;
  unsigned char *saved = m_savedImage.data;

  m_savedImage.xsize=image.xsize;
  m_savedImage.ysize=image.ysize;
  m_savedImage.setCsizeByFormat(image.format);
  m_savedImage.reallocate();
  if(saved!=m_savedImage.data) {
    m_savedImage.setBlack();
  }
  saved=m_savedImage.data;

  width = image.xsize/8;
  /*
  // hmm: why does it read 235 ?
  rightGain = (signed short)(235. * m_motionblur);
  imageGain = (signed short) (255. - (235. * m_motionblur));
  */
  rightGain = m_blur1;
  imageGain = m_blur0;

  union {
    signed short        elements[8];
    vector      signed short v;
  } shortBuffer;

  union {
    unsigned int        elements[4];
    vector      unsigned int v;
  } bitBuffer;

  register vector signed short gainAdd, hiImage, loImage,hiRight,loRight,
           YImage, UVImage;
  // register vector signed short loadhiImage, loadloImage,loadhiRight,loadloRight;
  register vector unsigned char loadImage, loadRight;
  register vector unsigned char zero = vec_splat_u8(0);
  register vector signed int UVhi,UVlo,Yhi,Ylo;
  register vector signed int UVhiR,UVloR,YhiR,YloR;
  register vector signed short gainSub,gain,gainR;//,d;
  register vector unsigned int bitshift;
  vector unsigned char *inData = (vector unsigned char*) image.data;
  vector unsigned char *rightData = (vector unsigned char*) saved;


  shortBuffer.elements[0] = 128;
  shortBuffer.elements[1] = 0;
  shortBuffer.elements[2] = 128;
  shortBuffer.elements[3] = 0;
  shortBuffer.elements[4] = 128;
  shortBuffer.elements[5] = 0;
  shortBuffer.elements[6] = 128;
  shortBuffer.elements[7] = 0;

  gainSub = shortBuffer.v;

  shortBuffer.elements[0] = imageGain;
  gain = shortBuffer.v;
  gain =  vec_splat(gain, 0 );

  shortBuffer.elements[0] = rightGain;
  gainR = shortBuffer.v;
  gainR =  vec_splat(gainR, 0 );

  bitBuffer.elements[0] = 8;

  //Load it into the vector unit
  bitshift = bitBuffer.v;
  bitshift = vec_splat(bitshift,0);

  shortBuffer.elements[0] = 128;

  //Load it into the vector unit
  gainAdd = shortBuffer.v;
  gainAdd = (vector signed short)vec_splat((vector signed short)gainAdd,0);

# ifndef PPC970
  UInt32                        prefetchSize = GetPrefetchConstant( 16, 1,
      256 );
  vec_dst( inData, prefetchSize, 0 );
  vec_dst( rightData, prefetchSize, 1 );
  vec_dst( inData+32, prefetchSize, 2 );
  vec_dst( rightData+32, prefetchSize, 3 );
# endif

  loadImage = inData[0];
  loadRight = rightData[0];

  for ( h=0; h<image.ysize; h++) {
    for (w=0; w<width; w++) {
# ifndef PPC970
      vec_dst( inData, prefetchSize, 0 );
      vec_dst( rightData, prefetchSize, 1 );
      vec_dst( inData+32, prefetchSize, 2 );
      vec_dst( rightData+32, prefetchSize, 3 );
# endif
      //interleaved U Y V Y chars

      hiImage = (vector signed short) vec_mergeh( zero, loadImage );
      loImage = (vector signed short) vec_mergel( zero, loadImage );

      hiRight = (vector signed short) vec_mergeh( zero, loadRight );
      loRight = (vector signed short) vec_mergel( zero, loadRight );

      //hoist that load!!
      loadImage = inData[1];
      loadRight = rightData[1];

      //subtract 128 from UV

      hiImage = vec_subs(hiImage,gainSub);
      loImage = vec_subs(loImage,gainSub);

      hiRight = vec_subs(hiRight,gainSub);
      loRight = vec_subs(loRight,gainSub);

      //now vec_mule the UV into two vector ints
      //change sone to gain
      UVhi = vec_mule(gain,hiImage);
      UVlo = vec_mule(gain,loImage);

      UVhiR = vec_mule(gainR,hiRight);
      UVloR = vec_mule(gainR,loRight);

      //now vec_mulo the Y into two vector ints
      Yhi = vec_mulo(gain,hiImage);
      Ylo = vec_mulo(gain,loImage);

      YhiR = vec_mulo(gainR,hiRight);
      YloR = vec_mulo(gainR,loRight);


      //this is where to do the add and bitshift due to the resolution
      //add UV
      UVhi = vec_adds(UVhi,UVhiR);
      UVlo = vec_adds(UVlo,UVloR);

      Yhi = vec_adds(Yhi,YhiR);
      Ylo = vec_adds(Ylo,YloR);

      //bitshift UV
      UVhi = vec_sra(UVhi,bitshift);
      UVlo = vec_sra(UVlo,bitshift);

      Yhi = vec_sra(Yhi,bitshift);
      Ylo = vec_sra(Ylo,bitshift);

      //pack the UV into a single short vector
      UVImage =  vec_packs(UVhi,UVlo);

      //pack the Y into a single short vector
      YImage =  vec_packs(Yhi,Ylo);

      //vec_mergel + vec_mergeh Y and UV
      hiImage =  vec_mergeh(UVImage,YImage);
      loImage =  vec_mergel(UVImage,YImage);

      //add 128 offset back
      hiImage = vec_adds(hiImage,gainSub);
      loImage = vec_adds(loImage,gainSub);

      //vec_mergel + vec_mergeh Y and UV
      rightData[0] = (vector unsigned char)vec_packsu(hiImage, loImage);
      inData[0] = (vector unsigned char)vec_packsu(hiImage, loImage);

      inData++;
      rightData++;
    }
  }
# ifndef PPC970
  //stop the cache streams
  vec_dss( 0 );
  vec_dss( 1 );
  vec_dss( 2 );
  vec_dss( 3 );
# endif


}/* end of working altivec function */
Example #8
0
void fdct_altivec(int16_t *block)
{
POWERPC_PERF_DECLARE(altivec_fdct, 1);
    vector signed short *bp;
    vector float *cp;
    vector float b00, b10, b20, b30, b40, b50, b60, b70;
    vector float b01, b11, b21, b31, b41, b51, b61, b71;
    vector float mzero, cnst, cnsts0, cnsts1, cnsts2;
    vector float x0, x1, x2, x3, x4, x5, x6, x7, x8;

    POWERPC_PERF_START_COUNT(altivec_fdct, 1);


    /* setup constants {{{ */
    /* mzero = -0.0 */
    mzero = ((vector float)vec_splat_u32(-1));
    mzero = ((vector float)vec_sl(vu32(mzero), vu32(mzero)));
    cp = fdctconsts;
    cnsts0 = vec_ld(0, cp); cp++;
    cnsts1 = vec_ld(0, cp); cp++;
    cnsts2 = vec_ld(0, cp);
    /* }}} */


    /* 8x8 matrix transpose (vector short[8]) {{{ */
#define MERGE_S16(hl,a,b) vec_merge##hl(vs16(a), vs16(b))

    bp = (vector signed short*)block;
    b00 = ((vector float)vec_ld(0,    bp));
    b40 = ((vector float)vec_ld(16*4, bp));
    b01 = ((vector float)MERGE_S16(h, b00, b40));
    b11 = ((vector float)MERGE_S16(l, b00, b40));
    bp++;
    b10 = ((vector float)vec_ld(0,    bp));
    b50 = ((vector float)vec_ld(16*4, bp));
    b21 = ((vector float)MERGE_S16(h, b10, b50));
    b31 = ((vector float)MERGE_S16(l, b10, b50));
    bp++;
    b20 = ((vector float)vec_ld(0,    bp));
    b60 = ((vector float)vec_ld(16*4, bp));
    b41 = ((vector float)MERGE_S16(h, b20, b60));
    b51 = ((vector float)MERGE_S16(l, b20, b60));
    bp++;
    b30 = ((vector float)vec_ld(0,    bp));
    b70 = ((vector float)vec_ld(16*4, bp));
    b61 = ((vector float)MERGE_S16(h, b30, b70));
    b71 = ((vector float)MERGE_S16(l, b30, b70));

    x0 = ((vector float)MERGE_S16(h, b01, b41));
    x1 = ((vector float)MERGE_S16(l, b01, b41));
    x2 = ((vector float)MERGE_S16(h, b11, b51));
    x3 = ((vector float)MERGE_S16(l, b11, b51));
    x4 = ((vector float)MERGE_S16(h, b21, b61));
    x5 = ((vector float)MERGE_S16(l, b21, b61));
    x6 = ((vector float)MERGE_S16(h, b31, b71));
    x7 = ((vector float)MERGE_S16(l, b31, b71));

    b00 = ((vector float)MERGE_S16(h, x0, x4));
    b10 = ((vector float)MERGE_S16(l, x0, x4));
    b20 = ((vector float)MERGE_S16(h, x1, x5));
    b30 = ((vector float)MERGE_S16(l, x1, x5));
    b40 = ((vector float)MERGE_S16(h, x2, x6));
    b50 = ((vector float)MERGE_S16(l, x2, x6));
    b60 = ((vector float)MERGE_S16(h, x3, x7));
    b70 = ((vector float)MERGE_S16(l, x3, x7));

#undef MERGE_S16
    /* }}} */


/* Some of the initial calculations can be done as vector short before
 * conversion to vector float.  The following code section takes advantage
 * of this.
 */
#if 1
    /* fdct rows {{{ */
    x0 = ((vector float)vec_add(vs16(b00), vs16(b70)));
    x7 = ((vector float)vec_sub(vs16(b00), vs16(b70)));
    x1 = ((vector float)vec_add(vs16(b10), vs16(b60)));
    x6 = ((vector float)vec_sub(vs16(b10), vs16(b60)));
    x2 = ((vector float)vec_add(vs16(b20), vs16(b50)));
    x5 = ((vector float)vec_sub(vs16(b20), vs16(b50)));
    x3 = ((vector float)vec_add(vs16(b30), vs16(b40)));
    x4 = ((vector float)vec_sub(vs16(b30), vs16(b40)));

    b70 = ((vector float)vec_add(vs16(x0), vs16(x3)));
    b10 = ((vector float)vec_add(vs16(x1), vs16(x2)));

    b00 = ((vector float)vec_add(vs16(b70), vs16(b10)));
    b40 = ((vector float)vec_sub(vs16(b70), vs16(b10)));

#define CTF0(n) \
    b##n##1 = ((vector float)vec_unpackl(vs16(b##n##0))); \
    b##n##0 = ((vector float)vec_unpackh(vs16(b##n##0))); \
    b##n##1 = vec_ctf(vs32(b##n##1), 0); \
    b##n##0 = vec_ctf(vs32(b##n##0), 0);

    CTF0(0);
    CTF0(4);

    b20 = ((vector float)vec_sub(vs16(x0), vs16(x3)));
    b60 = ((vector float)vec_sub(vs16(x1), vs16(x2)));

    CTF0(2);
    CTF0(6);

#undef CTF0

    x0 = vec_add(b60, b20);
    x1 = vec_add(b61, b21);

    cnst = LD_W2;
    x0 = vec_madd(cnst, x0, mzero);
    x1 = vec_madd(cnst, x1, mzero);
    cnst = LD_W1;
    b20 = vec_madd(cnst, b20, x0);
    b21 = vec_madd(cnst, b21, x1);
    cnst = LD_W0;
    b60 = vec_madd(cnst, b60, x0);
    b61 = vec_madd(cnst, b61, x1);

#define CTFX(x,b) \
    b##0 = ((vector float)vec_unpackh(vs16(x))); \
    b##1 = ((vector float)vec_unpackl(vs16(x))); \
    b##0 = vec_ctf(vs32(b##0), 0); \
    b##1 = vec_ctf(vs32(b##1), 0); \

    CTFX(x4, b7);
    CTFX(x5, b5);
    CTFX(x6, b3);
    CTFX(x7, b1);

#undef CTFX


    x0 = vec_add(b70, b10);
    x1 = vec_add(b50, b30);
    x2 = vec_add(b70, b30);
    x3 = vec_add(b50, b10);
    x8 = vec_add(x2, x3);
    cnst = LD_W3;
    x8 = vec_madd(cnst, x8, mzero);

    cnst = LD_W8;
    x0 = vec_madd(cnst, x0, mzero);
    cnst = LD_W9;
    x1 = vec_madd(cnst, x1, mzero);
    cnst = LD_WA;
    x2 = vec_madd(cnst, x2, x8);
    cnst = LD_WB;
    x3 = vec_madd(cnst, x3, x8);

    cnst = LD_W4;
    b70 = vec_madd(cnst, b70, x0);
    cnst = LD_W5;
    b50 = vec_madd(cnst, b50, x1);
    cnst = LD_W6;
    b30 = vec_madd(cnst, b30, x1);
    cnst = LD_W7;
    b10 = vec_madd(cnst, b10, x0);

    b70 = vec_add(b70, x2);
    b50 = vec_add(b50, x3);
    b30 = vec_add(b30, x2);
    b10 = vec_add(b10, x3);


    x0 = vec_add(b71, b11);
    x1 = vec_add(b51, b31);
    x2 = vec_add(b71, b31);
    x3 = vec_add(b51, b11);
    x8 = vec_add(x2, x3);
    cnst = LD_W3;
    x8 = vec_madd(cnst, x8, mzero);

    cnst = LD_W8;
    x0 = vec_madd(cnst, x0, mzero);
    cnst = LD_W9;
    x1 = vec_madd(cnst, x1, mzero);
    cnst = LD_WA;
    x2 = vec_madd(cnst, x2, x8);
    cnst = LD_WB;
    x3 = vec_madd(cnst, x3, x8);

    cnst = LD_W4;
    b71 = vec_madd(cnst, b71, x0);
    cnst = LD_W5;
    b51 = vec_madd(cnst, b51, x1);
    cnst = LD_W6;
    b31 = vec_madd(cnst, b31, x1);
    cnst = LD_W7;
    b11 = vec_madd(cnst, b11, x0);

    b71 = vec_add(b71, x2);
    b51 = vec_add(b51, x3);
    b31 = vec_add(b31, x2);
    b11 = vec_add(b11, x3);
    /* }}} */
#else
    /* convert to float {{{ */
#define CTF(n) \
    vs32(b##n##1) = vec_unpackl(vs16(b##n##0)); \
    vs32(b##n##0) = vec_unpackh(vs16(b##n##0)); \
    b##n##1 = vec_ctf(vs32(b##n##1), 0); \
    b##n##0 = vec_ctf(vs32(b##n##0), 0); \

    CTF(0);
    CTF(1);
    CTF(2);
    CTF(3);
    CTF(4);
    CTF(5);
    CTF(6);
    CTF(7);

#undef CTF
    /* }}} */

    FDCTROW(b00, b10, b20, b30, b40, b50, b60, b70);
    FDCTROW(b01, b11, b21, b31, b41, b51, b61, b71);
#endif


    /* 8x8 matrix transpose (vector float[8][2]) {{{ */
    x0 = vec_mergel(b00, b20);
    x1 = vec_mergeh(b00, b20);
    x2 = vec_mergel(b10, b30);
    x3 = vec_mergeh(b10, b30);

    b00 = vec_mergeh(x1, x3);
    b10 = vec_mergel(x1, x3);
    b20 = vec_mergeh(x0, x2);
    b30 = vec_mergel(x0, x2);

    x4 = vec_mergel(b41, b61);
    x5 = vec_mergeh(b41, b61);
    x6 = vec_mergel(b51, b71);
    x7 = vec_mergeh(b51, b71);

    b41 = vec_mergeh(x5, x7);
    b51 = vec_mergel(x5, x7);
    b61 = vec_mergeh(x4, x6);
    b71 = vec_mergel(x4, x6);

    x0 = vec_mergel(b01, b21);
    x1 = vec_mergeh(b01, b21);
    x2 = vec_mergel(b11, b31);
    x3 = vec_mergeh(b11, b31);

    x4 = vec_mergel(b40, b60);
    x5 = vec_mergeh(b40, b60);
    x6 = vec_mergel(b50, b70);
    x7 = vec_mergeh(b50, b70);

    b40 = vec_mergeh(x1, x3);
    b50 = vec_mergel(x1, x3);
    b60 = vec_mergeh(x0, x2);
    b70 = vec_mergel(x0, x2);

    b01 = vec_mergeh(x5, x7);
    b11 = vec_mergel(x5, x7);
    b21 = vec_mergeh(x4, x6);
    b31 = vec_mergel(x4, x6);
    /* }}} */


    FDCTCOL(b00, b10, b20, b30, b40, b50, b60, b70);
    FDCTCOL(b01, b11, b21, b31, b41, b51, b61, b71);


    /* round, convert back to short {{{ */
#define CTS(n) \
    b##n##0 = vec_round(b##n##0); \
    b##n##1 = vec_round(b##n##1); \
    b##n##0 = ((vector float)vec_cts(b##n##0, 0)); \
    b##n##1 = ((vector float)vec_cts(b##n##1, 0)); \
    b##n##0 = ((vector float)vec_pack(vs32(b##n##0), vs32(b##n##1))); \
    vec_st(vs16(b##n##0), 0, bp);

    bp = (vector signed short*)block;
    CTS(0); bp++;
    CTS(1); bp++;
    CTS(2); bp++;
    CTS(3); bp++;
    CTS(4); bp++;
    CTS(5); bp++;
    CTS(6); bp++;
    CTS(7);

#undef CTS
    /* }}} */

POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
}
Example #9
0
void pix_movement :: processYUVAltivec(imageStruct &image)
{
  if (image.xsize*image.ysize != buffer.xsize*buffer.ysize) {
    buffer.xsize = image.xsize;
    buffer.ysize = image.ysize;
    buffer.reallocate(buffer.xsize*buffer.ysize*2);
  }
  int pixsize = image.ysize * image.xsize/8;

  union {
    signed short  c[8];
    vector signed short  v;
  } shortBuffer;

  union {
    unsigned short  c[8];
    vector unsigned short  v;
  } ushortBuffer;

  int i;

  vector signed short thresh;
  shortBuffer.c[0] = threshold;
  thresh = shortBuffer.v;
  thresh = (vector signed short)vec_splat(thresh,0);

  vector unsigned char *rp = (vector unsigned char *)
                             image.data; // read pointer
  vector unsigned char *wp = (vector unsigned char *)
                             buffer.data; // write pointer to the copy
  vector unsigned char grey0,grey1;
  vector unsigned char one = vec_splat_u8(1);
  vector unsigned short Y0,Ywp0,hiImage0,loImage0;
  vector unsigned short Y1,Ywp1,hiImage1,loImage1;
  vector unsigned short UVwp0,UVwp1;
  vector signed short temp0,temp1;

  ushortBuffer.c[0]=127;
  vector unsigned short UV0= (vector unsigned short)vec_splat(ushortBuffer.v,
                             0);
  vector unsigned short UV1= (vector unsigned short)vec_splat(ushortBuffer.v,
                             0);

#ifndef PPC970
  //setup the cache prefetch -- A MUST!!!
  UInt32 prefetchSize = GetPrefetchConstant( 16, 0, 256 );
  vec_dst( rp, prefetchSize, 0 );
  vec_dst( wp, prefetchSize, 1 );
#endif

  int j = 16;

  pixsize/=2;
  for (i=0; i < pixsize; i++) {
# ifndef PPC970
    //setup the cache prefetch -- A MUST!!!
    UInt32 prefetchSize = GetPrefetchConstant( j, 0, j * 16 );
    vec_dst( rp, prefetchSize, 0 );
    vec_dst( wp, prefetchSize, 1 );
    vec_dst( rp+16, prefetchSize, 2 );
    vec_dst( wp+16, prefetchSize, 3 );
# endif

    grey0 = rp[0];
    grey1 = rp[1];

//      rp[Y0]=255*(abs(grey0-*wp)>thresh);

//      UV0= (vector unsigned short)vec_mule(grey0,one);
    Y0 = (vector unsigned short)vec_mulo(grey0,one);

//      UV1= (vector unsigned short)vec_mule(grey1,one);
    Y1 = (vector unsigned short)vec_mulo(grey1,one);

    //wp is actually 1/2 the size of the image because it is only Y??

    //here the full U Y V Y is stored
//      UVwp0= (vector unsigned short)vec_mule(wp[0],one);
    Ywp0 = (vector unsigned short)vec_mulo(wp[0],one);

//      UVwp1= (vector unsigned short)vec_mule(wp[1],one);
    Ywp1 = (vector unsigned short)vec_mulo(wp[1],one);

    //store the current pixels as the history for next time
    wp[0]=grey0;
    wp++;
    wp[0]=grey1;
    wp++;

    temp0 = vec_abs(vec_sub((vector signed short)Y0,
                            (vector signed short)Ywp0));
    Y0 = (vector unsigned short)vec_cmpgt(temp0,thresh);

    temp1 = vec_abs(vec_sub((vector signed short)Y1,
                            (vector signed short)Ywp1));
    Y1 = (vector unsigned short)vec_cmpgt(temp1,thresh);

    hiImage0 = vec_mergeh(UV0,Y0);
    loImage0 = vec_mergel(UV0,Y0);

    hiImage1 = vec_mergeh(UV1,Y1);
    loImage1 = vec_mergel(UV1,Y1);

    grey0 = vec_packsu(hiImage0,loImage0);
    grey1 = vec_packsu(hiImage1,loImage1);

    rp[0]=grey0;
    rp++;
    rp[0]=grey1;
    rp++;
    // grey = rp[0];
    // rp[Y1]=255*(abs(grey-*wp)>thresh);
    // *wp++=grey;

    // rp+=4;
    // rp++;
  }

# ifndef PPC970
  vec_dss(0);
  vec_dss(1);
  vec_dss(2);
  vec_dss(3);
# endif
}
Example #10
0
void subsample_image_altivec(SUBSAMPLE_IMAGE_PDECL)
{
    int i, ii, j, stride1, stride2, stride3, stride4, halfstride;
    unsigned char *pB, *pB2, *pB4;
    vector unsigned char l0, l1, l2, l3;
    vector unsigned short s0, s1, s2, s3;
    vector unsigned short s22_0, s22_1, s22_2, s22_3;
    vector unsigned short s44, s44_0, s44_1;
    vector unsigned short zero, two;
#ifdef ALTIVEC_DST
    DataStreamControl dsc;
#endif

#ifdef ALTIVEC_VERIFY
    if (NOT_VECTOR_ALIGNED(image))
	mjpeg_error_exit1("subsample_image: %s %% %d != 0, (%d)",
	    "image", 16, image);
    if (NOT_VECTOR_ALIGNED(sub22_image))
	mjpeg_error_exit1("subsample_image: %s %% %d != 0, (%d)",
	    "sub22_image", 16, sub22_image);
    if (NOT_VECTOR_ALIGNED(sub44_image))
	mjpeg_error_exit1("subsample_image: %s %% %d != 0, (%d)",
	    "sub44_image", 16, sub44_image);

    if ((rowstride & 63) != 0)
	mjpeg_error_exit1("subsample_image: %s %% %d != 0, (%d)",
	    "rowstride", 64, rowstride);
#endif

    AMBER_START;

    pB = image;

#ifdef ALTIVEC_DST
    dsc.control = DATA_STREAM_CONTROL(6,4,0);
    dsc.block.stride = rowstride;

    vec_dst(pB, dsc.control, 0);
#endif

    pB2 = sub22_image;
    pB4 = sub44_image;

    j = ((unsigned long)(pB2 - pB) / rowstride) >> 2; /* height/4 */

    stride1 = rowstride;
    stride2 = stride1 + stride1;
    stride3 = stride2 + stride1;
    stride4 = stride2 + stride2;
    halfstride = stride1 >> 1; /* /2 */

    ii = rowstride >> 6; /* rowstride/16/4 */

    zero = vec_splat_u16(0);
    two = vec_splat_u16(2);

    do {
	i = ii;
	do {
	    l0 = vec_ld(0, pB);
	    l1 = vec_ld(stride1, pB);
	    l2 = vec_ld(stride2, pB);
	    l3 = vec_ld(stride3, pB);
	    pB += 16;
#ifdef ALTIVEC_DST
	    vec_dst(pB + (16 * 3), dsc.control, 0);
#endif

	    /* l0 = 0x[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F] */
	    /* l1 = 0x[10,11,12,13,14,15,16,17,18,19,1A,1B,1C,1D,1E,1F] */
	    /* l2 = 0x[20,21,22,23,24,25,26,27,28,29,2A,2B,2C,2D,2E,2F] */
	    /* l3 = 0x[30,31,32,33,34,35,36,37,38,39,3A,3B,3C,3D,3E,3F] */

	    /* s0 = 0x[00,01,      02,03,      04,05,      06,07,     ] */
	    /*        [      10,11,      12,13,      14,15,      16,17] */
	    s0 = vu16(vec_mergeh(vu16(l0), vu16(l1)));
	    /* s0 = 0x[00+01+10+11,02+03+12+13,04+05+14+15,06+07+16+17] */
	    s0 = vu16(vec_sum4s(vu8(s0), vu32(zero)));

	    /* s1 = 0x[08,09,      0A,0B,      0C,0D,      0E,0F,     ] */
	    /*        [      18,19,      1A,1B,      1C,1D,      1E,1F] */
	    s1 = vu16(vec_mergel(vu16(l0), vu16(l1)));
	    /* s1 = 0x[08+09+18+19,0A+0B+1A+1B,0C+0D+1C+1D,0E+0F+1E+1F] */
	    s1 = vu16(vec_sum4s(vu8(s1), vu32(zero)));

	    /* s2 = 0x[20,21,      22,23,      24,25,      26,27,     ] */
	    /*        [      30,31,      32,33,      34,35,      36,37] */
	    s2 = vu16(vec_mergeh(vu16(l2), vu16(l3)));
	    /* s2 = 0x[20+21+30+31,22+23+32+33,24+25+34+35,26+27+36+37] */
	    s2 = vu16(vec_sum4s(vu8(s2), vu32(zero)));

	    /* s3 = 0x[28,29,      2A,2B,      2C,2D,      2E,2F,     ] */
	    /*        [      38,39,      3A,3B,      3C,3D,      3E,3F] */
	    s3 = vu16(vec_mergel(vu16(l2), vu16(l3)));
	    /* s3 = 0x[28+29+38+39,2A+2B+3A+3B,2C+2D+3C+3D,2E+2F+3E+3F] */
	    s3 = vu16(vec_sum4s(vu8(s3), vu32(zero)));

	    /* start loading next block */
	    l0 = vec_ld(0, pB);
	    l1 = vec_ld(stride1, pB);
	    l2 = vec_ld(stride2, pB);
	    l3 = vec_ld(stride3, pB);
	    pB += 16;

	    /* s0 = 0x[00+01+10+11, 02+03+12+13, 04+05+14+15, 06+07+16+17] */
	    /* s1 = 0x[08+09+18+19, 0A+0B+1A+1B, 0C+0D+1C+1D, 0E+0F+1E+1F] */
	    /* s2 = 0x[20+21+30+31, 22+23+32+33, 24+25+34+35, 26+27+36+37] */
	    /* s3 = 0x[28+29+38+39, 2A+2B+3A+3B, 2C+2D+3C+3D, 2E+2F+3E+3F] */

	    /* s22_0 = 0x[   00,   02,   04,   06,   08,   0A,   0C,   0E] */
	    s22_0 = vec_packsu(vu32(s0), vu32(s1));
	    /* s22_1 = 0x[   20,   22,   24,   26,   28,   2A,   2C,   2E] */
	    s22_1 = vec_packsu(vu32(s2), vu32(s3));

	    /* (pB[i]+pB[i+1]+pN[i]+pN[i+1]) + 2 */
	    s22_0 = vec_add(s22_0, two);
	    /* (pNN[i]+pNN[i+1]+pNNN[i]+pNNN[i+1]) + 2 */
	    s22_1 = vec_add(s22_1, two);

	    /* (pB[i]+pB[i+1]+pN[i]+pN[i+1]+2) >> 2 */
	    s22_0 = vec_sra(s22_0, two);
	    /* (pNN[i]+pNN[i+1]+pNNN[i]+pNNN[i+1]+2) >> 2 */
	    s22_1 = vec_sra(s22_1, two);

	    /* s22_0 = 0x[   00,   02,   04,   06,   08,   0A,   0C,   0E] */
	    /* s22_1 = 0x[   20,   22,   24,   26,   28,   2A,   2C,   2E] */
	    /* s44_0 = 0x[00+20,02+22,04+24,06+26,08+28,0A+2A,0C+2C,0E+2E] */
	    s44_0 = vec_add(s22_0, s22_1);

	    /* s44_0 = 0x[00+20+02+22, 04+24+06+26, 08+28+0A+2A, 0C+2C+0E+2E] */
	    s44_0 = vu16(vec_sum4s(vs16(s44_0), vs32(zero)));

	    /* - - - - - - - - - - - - - - - - - - - */
	    s0 = vu16(vec_mergeh(vu16(l0), vu16(l1)));
	    s0 = vu16(vec_sum4s(vu8(s0), vu32(zero)));
	    s1 = vu16(vec_mergel(vu16(l0), vu16(l1)));
	    s1 = vu16(vec_sum4s(vu8(s1), vu32(zero)));
	    s2 = vu16(vec_mergeh(vu16(l2), vu16(l3)));
	    s2 = vu16(vec_sum4s(vu8(s2), vu32(zero)));
	    s3 = vu16(vec_mergel(vu16(l2), vu16(l3)));
	    s3 = vu16(vec_sum4s(vu8(s3), vu32(zero)));

	    /* start loading next l[0-3] */
	    l0 = vec_ld(0, pB);
	    l1 = vec_ld(stride1, pB);
	    l2 = vec_ld(stride2, pB);
	    l3 = vec_ld(stride3, pB);
	    pB += 16;


	    s22_2 = vec_packsu(vu32(s0), vu32(s1));
	    s22_3 = vec_packsu(vu32(s2), vu32(s3));

	    s22_2 = vec_add(s22_2, two);
	    s22_3 = vec_add(s22_3, two);

	    s22_2 = vec_sra(s22_2, two);
	    s22_3 = vec_sra(s22_3, two);


	    s44_1 = vec_add(s22_2, s22_3);
	    s44_1 = vu16(vec_sum4s(vs16(s44_1), vs32(zero)));

	    /* store s22 block */
	    s22_0 = vu16(vec_packsu(s22_0, s22_2));
	    s22_1 = vu16(vec_packsu(s22_1, s22_3));
	    vec_st(vu8(s22_0), 0, pB2);
	    vec_st(vu8(s22_1), halfstride, pB2);
	    pB2 += 16;

	    /* - - - - - - - - - - - - - - - - - - - */
	    s0 = vu16(vec_mergeh(vu16(l0), vu16(l1)));
	    s0 = vu16(vec_sum4s(vu8(s0), vu32(zero)));
	    s1 = vu16(vec_mergel(vu16(l0), vu16(l1)));
	    s1 = vu16(vec_sum4s(vu8(s1), vu32(zero)));
	    s2 = vu16(vec_mergeh(vu16(l2), vu16(l3)));
	    s2 = vu16(vec_sum4s(vu8(s2), vu32(zero)));
	    s3 = vu16(vec_mergel(vu16(l2), vu16(l3)));
	    s3 = vu16(vec_sum4s(vu8(s3), vu32(zero)));

	    /* starting loading next l[0-3] */
	    l0 = vec_ld(0, pB);
	    l1 = vec_ld(stride1, pB);
	    l2 = vec_ld(stride2, pB);
	    l3 = vec_ld(stride3, pB);
	    pB += 16;


	    s22_0 = vec_packsu(vu32(s0), vu32(s1));
	    s22_1 = vec_packsu(vu32(s2), vu32(s3));

	    s22_0 = vec_add(s22_0, two);
	    s22_1 = vec_add(s22_1, two);

	    s22_0 = vec_sra(s22_0, two);
	    s22_1 = vec_sra(s22_1, two);


	    s44 = vec_packsu(vu32(s44_0), vu32(s44_1));
	    s44 = vec_add(s44, two);
	    s44 = vec_sra(s44, two);

	    s44_0 = vec_add(s22_0, s22_1);
	    s44_0 = vu16(vec_sum4s(vs16(s44_0), vs32(zero)));

	    /* - - - - - - - - - - - - - - - - - - - */
	    s0 = vu16(vec_mergeh(vu16(l0), vu16(l1)));
	    s0 = vu16(vec_sum4s(vu8(s0), vu32(zero)));
	    s1 = vu16(vec_mergel(vu16(l0), vu16(l1)));
	    s1 = vu16(vec_sum4s(vu8(s1), vu32(zero)));
	    s2 = vu16(vec_mergeh(vu16(l2), vu16(l3)));
	    s2 = vu16(vec_sum4s(vu8(s2), vu32(zero)));
	    s3 = vu16(vec_mergel(vu16(l2), vu16(l3)));
	    s3 = vu16(vec_sum4s(vu8(s3), vu32(zero)));

	    s22_2 = vec_packsu(vu32(s0), vu32(s1));
	    s22_3 = vec_packsu(vu32(s2), vu32(s3));

	    s22_2 = vec_add(s22_2, two);
	    s22_3 = vec_add(s22_3, two);

	    s22_2 = vec_sra(s22_2, two);
	    s22_3 = vec_sra(s22_3, two);

	    s44_1 = vec_add(s22_2, s22_3);
	    s44_1 = vu16(vec_sum4s(vs16(s44_1), vs32(zero)));

	    /* store s22 block */
	    s22_0 = vu16(vec_packsu(s22_0, s22_2));
	    s22_1 = vu16(vec_packsu(s22_1, s22_3));
	    vec_st(vu8(s22_0), 0, pB2);
	    vec_st(vu8(s22_1), halfstride, pB2);
	    pB2 += 16;

	    /* pack all four s44 chunks */
	    s44_0 = vec_packsu(vu32(s44_0), vu32(s44_1));
	    s44_0 = vec_add(s44_0, two);
	    s44_0 = vec_sra(s44_0, two);
	    s44 = vu16(vec_packsu(s44, s44_0));

	    vec_st(vu8(s44), 0, pB4);
	    pB4 += 16;

	} while (--i);

	pB += stride3;
	pB2 += halfstride;

    } while (--j);

#ifdef ALTIVEC_DST
    vec_dss(0);
#endif

    AMBER_STOP;
}
Example #11
0
int bsumsq_altivec(BSUMSQ_PDECL)
{
    int i;
    uint8_t *pfy, *pby;
    vector unsigned char l0, l1, lR;
    vector unsigned char permF0, permF1, permB0, permB1;
    vector unsigned char vf, vfa, vfb, vfc;
    vector unsigned char vb, vba, vbb, vbc;
    vector unsigned short tH, tL, fH, fL, bH, bL;
    vector unsigned char zero;
    vector unsigned short one, two;
    vector unsigned char max, min, dif;
    vector unsigned int sum;
    union {
	vector signed int v;
	struct {
	    signed int pad[3];
	    signed int sum;
	} s;
    } vo;


 
#ifdef ALTIVEC_VERIFY
    if (hxf != 0 && hxf != 1)
	mjpeg_error_exit1("bsumsq: hxf != [0|1], (hxf=%d)", hxf);

    if (hyf != 0 && hyf != 1)
	mjpeg_error_exit1("bsumsq: hyf != [0|1], (hyf=%d)", hyf);

    if (hxb != 0 && hxb != 1)
	mjpeg_error_exit1("bsumsq: hxb != [0|1], (hxb=%d)", hxb);

    if (hyb != 0 && hyb != 1)
	mjpeg_error_exit1("bsumsq: hyb != [0|1], (hyb=%d)", hyb);

    if (NOT_VECTOR_ALIGNED(p2))
	mjpeg_error_exit1("bsumsq: p2 %% 16 != 0, (0x%X)", p2);

    if (NOT_VECTOR_ALIGNED(rowstride))
	mjpeg_error_exit1("bsumsq: rowstride %% 16 != 0, (%d)", rowstride);

    if (h != 8 && h != 16)
	mjpeg_error_exit1("bsumsq: h != [8|16], (%d)", h);
#endif

    AMBER_START;


    /* start loading first set  */
    vfb = vec_ld(0, pf);	 /* use vfb & vfc as temp for vf & vfa */
    vfc = vec_ld(16, pf);

    pfy = pf + (rowstride * hyf);
    l0 = vec_ld(0, pfy);
    l1 = vec_ld(16, pfy);


    pby = pb + (rowstride * hyb);


    zero  = vec_splat_u8(0);
    one = vec_splat_u16(1);
    two = vec_splat_u16(2);

    sum = vec_splat_u32(0);


    permF0 = vec_lvsl(0, pf);
    permF1 = vec_lvsl(hxf, (unsigned char*)0);
    permF1 = vec_splat(permF1, 0);
    permF1 = vec_add(permF0, permF1);
    
    permB0 = vec_lvsl(0, pb);
    permB1 = vec_lvsl(hxb, (unsigned char*)0);
    permB1 = vec_splat(permB1, 0);
    permB1 = vec_add(permB0, permB1);

    
    i = h - 1;
    do { /* while (--i) */

	vf = vec_perm(vfb, vfc, permF0);
	vfa = vec_perm(vfb, vfc, permF1);
	vfb = vec_perm(l0, l1, permF0);
	vfc = vec_perm(l0, l1, permF1);

	vbb = vec_ld(0, pb);	 /* use vbb & vbc as temp for vb & vba */
	vbc = vec_ld(16, pb);
	l0 = vec_ld(0, pby);
	l1 = vec_ld(16, pby);

	pb += rowstride;
	pby += rowstride;

	/* (unsigned short[]) pf[0-7] */    
	fH = vu16(vec_mergeh(zero, vf));
			
	/* (unsigned short[]) pf[8-15] */   
	fL = vu16(vec_mergel(zero, vf));
			
	/* (unsigned short[]) pfa[0-7] */    
	tH = vu16(vec_mergeh(zero, vfa));
			
	/* (unsigned short[]) pfa[8-15] */   
	tL = vu16(vec_mergel(zero, vfa));

	/* pf[i] + pfa[i] */                                                 
	fH = vec_add(fH, tH);                                               
	fL = vec_add(fL, tL);                                               

	/* (unsigned short[]) pfb[0-7] */  
	tH = vu16(vec_mergeh(zero, vfb));
			
	/* (unsigned short[]) pfb[8-15] */ 
	tL = vu16(vec_mergel(zero, vfb));

	/* (pf[i]+pfa[i]) + pfb[i] */                                       
	fH = vec_add(fH, tH);                                                
	fL = vec_add(fL, tL);                                                
			
	/* (unsigned short[]) pfc[0-7] */  
	tH = vu16(vec_mergeh(zero, vfc));
			
	/* (unsigned short[]) pfc[8-15] */ 
	tL = vu16(vec_mergel(zero, vfc));

	/* (pf[i]+pfa[i]+pfb[i]) + pfc[i] */
	fH = vec_add(fH, tH);                                                
	fL = vec_add(fL, tL);                                                

							
	/* (pf[i]+pfa[i]+pfb[i]+pfc[i]) + 2 */
	fH = vec_add(fH, two);                                                
	fL = vec_add(fL, two);                                                
							
	/* (pf[i]+pfa[i]+pfb[i]+pfc[i]+2) >> 2 */
	fH = vec_sra(fH, two);                                                
	fL = vec_sra(fL, two);                                                


	lR = vec_ld(0, p2);
	p2 += rowstride;

	vb = vec_perm(vbb, vbc, permB0);
	vba = vec_perm(vbb, vbc, permB1);
	vbb = vec_perm(l0, l1, permB0);
	vbc = vec_perm(l0, l1, permB1);


	pf += rowstride;
	vfb = vec_ld(0, pf);	 /* use vfb & vfc as temp for vf & vfa */
	vfc = vec_ld(16, pf);
	pfy += rowstride;
	l0 = vec_ld(0, pfy);
	l1 = vec_ld(16, pfy);

	/* (unsigned short[]) pb[0-7] */    
	bH = vu16(vec_mergeh(zero, vb));

	/* (unsigned short[]) pb[8-15] */   
	bL = vu16(vec_mergel(zero, vb));

	/* (unsigned short[]) pba[0-7] */    
	tH = vu16(vec_mergeh(zero, vba));

	/* (unsigned short[]) pba[8-15] */   
	tL = vu16(vec_mergel(zero, vba));

	/* pb[i] + pba[i] */                                                 
	bH = vec_add(bH, tH);                                               
	bL = vec_add(bL, tL);                                               

	/* (unsigned short[]) pbb[0-7] */  
	tH = vu16(vec_mergeh(zero, vbb));

	/* (unsigned short[]) pbb[8-15] */ 
	tL = vu16(vec_mergel(zero, vbb));

	/* (pb[i]+pba[i]) + pbb[i] */                                       
	bH = vec_add(bH, tH);                                                
	bL = vec_add(bL, tL);                                                
			
	/* (unsigned short[]) pbc[0-7] */  
	tH = vu16(vec_mergeh(zero, vbc));

	/* (unsigned short[]) pbc[8-15] */ 
	tL = vu16(vec_mergel(zero, vbc));

	/* (pb[i]+pba[i]+pbb[i]) + pbc[i] */
	bH = vec_add(bH, tH);                                                
	bL = vec_add(bL, tL);                                                

							
	/* (pb[i]+pba[i]+pbb[i]+pbc[i]) + 2 */
	bH = vec_add(bH, two);                                                
	bL = vec_add(bL, two);                                                

	/* (pb[i]+pba[i]+pbb[i]+pbc[i]+2) >> 2 */
	bH = vec_sra(bH, two);                                                
	bL = vec_sra(bL, two);                                                

	/* ((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2) +
	 * ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)
         */
	tH = vec_add(fH, bH);                                                
	tL = vec_add(fL, bL);                                                

	/* (((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2)+
	 *  ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)) + 1
         */
	tH = vec_add(tH, one);                                                
	tL = vec_add(tL, one);                                                

	/* (((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2)+
	 *  ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)+1) >> 1
         */
	tH = vec_sra(tH, one);                                                
	tL = vec_sra(tL, one);                                                

	/* absolute value increases parallelism (x16 instead of x8)
	 * since a bit isn't lost on the sign.
	 * 
	 * d = abs( ((((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2)+
	 *            ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)+1)>>1) - p2[i] )
         */
	tH = vu16(vec_packsu(tH, tL));
	min = vec_min(vu8(tH), lR);                                           
	max = vec_max(vu8(tH), lR);                                           
	dif = vec_sub(max, min);                                              

	/* sum += (d * d) */                                                   
	sum = vec_msum(dif, dif, sum);                                        

    } while (--i);

    vf = vec_perm(vfb, vfc, permF0);
    vfa = vec_perm(vfb, vfc, permF1);
    vfb = vec_perm(l0, l1, permF0);
    vfc = vec_perm(l0, l1, permF1);

    vbb = vec_ld(0, pb);	 /* use vbb & vbc as temp for vb & vba */
    vbc = vec_ld(16, pb);
    l0 = vec_ld(0, pby);
    l1 = vec_ld(16, pby);

    /* (unsigned short[]) pf[0-7] */    
    fH = vu16(vec_mergeh(zero, vf));
			
    /* (unsigned short[]) pf[8-15] */   
    fL = vu16(vec_mergel(zero, vf));
			
    /* (unsigned short[]) pfa[0-7] */    
    tH = vu16(vec_mergeh(zero, vfa));
			
    /* (unsigned short[]) pfa[8-15] */   
    tL = vu16(vec_mergel(zero, vfa));

    /* pf[i] + pfa[i] */                                                 
    fH = vec_add(fH, tH);                                               
    fL = vec_add(fL, tL);                                               

    /* (unsigned short[]) pfb[0-7] */  
    tH = vu16(vec_mergeh(zero, vfb));

    /* (unsigned short[]) pfb[8-15] */ 
    tL = vu16(vec_mergel(zero, vfb));

    /* (pf[i]+pfa[i]) + pfb[i] */                                       
    fH = vec_add(fH, tH);                                                
    fL = vec_add(fL, tL);                                                

    /* (unsigned short[]) pfc[0-7] */  
    tH = vu16(vec_mergeh(zero, vfc));
			
    /* (unsigned short[]) pfc[8-15] */ 
    tL = vu16(vec_mergel(zero, vfc));

    /* (pf[i]+pfa[i]+pfb[i]) + pfc[i] */
    fH = vec_add(fH, tH);                                                
    fL = vec_add(fL, tL);                                                

    /* (pf[i]+pfa[i]+pfb[i]+pfc[i]) + 2 */
    fH = vec_add(fH, two);
    fL = vec_add(fL, two);

    /* (pf[i]+pfa[i]+pfb[i]+pfc[i]+2) >> 2 */
    fH = vec_sra(fH, two);
    fL = vec_sra(fL, two);

    lR = vec_ld(0, p2);

    vb = vec_perm(vbb, vbc, permB0);
    vba = vec_perm(vbb, vbc, permB1);
    vbb = vec_perm(l0, l1, permB0);
    vbc = vec_perm(l0, l1, permB1);

    /* (unsigned short[]) pb[0-7] */    
    bH = vu16(vec_mergeh(zero, vb));
			
    /* (unsigned short[]) pb[8-15] */   
    bL = vu16(vec_mergel(zero, vb));

    /* (unsigned short[]) pba[0-7] */
    tH = vu16(vec_mergeh(zero, vba));

    /* (unsigned short[]) pba[8-15] */   
    tL = vu16(vec_mergel(zero, vba));

    /* pb[i] + pba[i] */                                                 
    bH = vec_add(bH, tH);                                               
    bL = vec_add(bL, tL);                                               

    /* (unsigned short[]) pbb[0-7] */  
    tH = vu16(vec_mergeh(zero, vbb));

    /* (unsigned short[]) pbb[8-15] */ 
    tL = vu16(vec_mergel(zero, vbb));

    /* (pb[i]+pba[i]) + pbb[i] */                                       
    bH = vec_add(bH, tH);                                                
    bL = vec_add(bL, tL);                                                
			
    /* (unsigned short[]) pbc[0-7] */  
    tH = vu16(vec_mergeh(zero, vbc));
			
    /* (unsigned short[]) pbc[8-15] */ 
    tL = vu16(vec_mergel(zero, vbc));

    /* (pb[i]+pba[i]+pbb[i]) + pbc[i] */
    bH = vec_add(bH, tH);                                                
    bL = vec_add(bL, tL);                                                

							
    /* (pb[i]+pba[i]+pbb[i]+pbc[i]) + 2 */
    bH = vec_add(bH, two);                                                
    bL = vec_add(bL, two);                                                
							
    /* (pb[i]+pba[i]+pbb[i]+pbc[i]+2) >> 2 */
    bH = vec_sra(bH, two);                                                
    bL = vec_sra(bL, two);                                                

    /* ((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2) +
     * ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)
     */
    tH = vec_add(fH, bH);                                                
    tL = vec_add(fL, bL);                                                

    /* (((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2)+
     *  ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)) + 1
     */
    tH = vec_add(tH, one);                                                
    tL = vec_add(tL, one);

    /* (((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2)+
     *  ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)+1) >> 1
     */
    tH = vec_sra(tH, one);                                                
    tL = vec_sra(tL, one);                                                

    /* absolute value increases parallelism (x16 instead of x8)
     * since a bit isn't lost on the sign.
     * 
     * d = abs( ((((pf[i]+pfa[i]+pfb[i]+pfc[i]+2)>>2)+
     *            ((pb[i]+pba[i]+pbb[i]+pbc[i]+2)>>2)+1)>>1) - p2[i] )
     */
    tH = vu16(vec_packsu(tH, tL));
    min = vec_min(vu8(tH), lR);                                           
    max = vec_max(vu8(tH), lR);                                           
    dif = vec_sub(max, min);                                              

    /* sum += (d * d) */                                                   
    sum = vec_msum(dif, dif, sum);                                        

    /* sum all parts of difference into one 32 bit quantity */
    vo.v = vec_sums(vs32(sum), vs32(zero));

    AMBER_STOP;
    return vo.s.sum;
}
Example #12
0
void test1() {
// CHECK-LABEL: define void @test1
// CHECK-LE-LABEL: define void @test1

  res_vf = vec_abs(vf);
// CHECK: call <4 x float> @llvm.fabs.v4f32(<4 x float> %{{[0-9]*}})
// CHECK-LE: call <4 x float> @llvm.fabs.v4f32(<4 x float> %{{[0-9]*}})

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_add(vd, vd);
// CHECK: fadd <2 x double>
// CHECK-LE: fadd <2 x double>

  res_vd = vec_and(vbll, vd);
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
// CHECK-LE: and <2 x i64>
// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>

  res_vd = vec_and(vd, vbll);
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
// CHECK-LE: and <2 x i64>
// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>

  res_vd = vec_and(vd, vd);
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
// CHECK-LE: and <2 x i64>
// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_andc(vbll, vd);
// CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
// CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
// CHECK-LE: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
// CHECK-LE: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
// CHECK-LE: and <2 x i64>
// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_andc(vd, vbll);
// CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
// CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
// CHECK-LE: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
// CHECK-LE: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
// CHECK-LE: and <2 x i64>
// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>

  dummy();
// CHECK: call void @dummy()

  res_vd = vec_andc(vd, vd);
// CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
// CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_ceil(vd);
// CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{[0-9]*}})
// CHECK-LE: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{[0-9]*}})

  res_vf = vec_ceil(vf);
// CHECK: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{[0-9]*}})
// CHECK-LE: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{[0-9]*}})

  res_vbll = vec_cmpeq(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})

  res_vbi = vec_cmpeq(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})

  res_vbll = vec_cmpge(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})

  res_vbi = vec_cmpge(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})

  res_vbll = vec_cmpgt(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})

  res_vbi = vec_cmpgt(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})

  res_vbll = vec_cmple(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})

  res_vbi = vec_cmple(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})

  res_vbll = vec_cmplt(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})

  res_vbi = vec_cmplt(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})

  /* vec_cpsgn */
  res_vf = vec_cpsgn(vf, vf);
// CHECK: call <4 x float> @llvm.copysign.v4f32(<4 x float> %{{.+}}, <4 x float> %{{.+}})
// CHECK-LE: call <4 x float> @llvm.copysign.v4f32(<4 x float> %{{.+}}, <4 x float> %{{.+}})

  res_vd = vec_cpsgn(vd, vd);
// CHECK: call <2 x double> @llvm.copysign.v2f64(<2 x double> %{{.+}}, <2 x double> %{{.+}})
// CHECK-LE: call <2 x double> @llvm.copysign.v2f64(<2 x double> %{{.+}}, <2 x double> %{{.+}})

  /* vec_div */
  res_vsll = vec_div(vsll, vsll);
// CHECK: sdiv <2 x i64>
// CHECK-LE: sdiv <2 x i64>

  res_vull = vec_div(vull, vull);
// CHECK: udiv <2 x i64>
// CHECK-LE: udiv <2 x i64>

  res_vf = vec_div(vf, vf);
// CHECK: fdiv <4 x float>
// CHECK-LE: fdiv <4 x float>

  res_vd = vec_div(vd, vd);
// CHECK: fdiv <2 x double>
// CHECK-LE: fdiv <2 x double>

  /* vec_max */
  res_vf = vec_max(vf, vf);
// CHECK: @llvm.ppc.vsx.xvmaxsp
// CHECK-LE: @llvm.ppc.vsx.xvmaxsp

  res_vd = vec_max(vd, vd);
// CHECK: @llvm.ppc.vsx.xvmaxdp
// CHECK-LE: @llvm.ppc.vsx.xvmaxdp

  res_vf = vec_vmaxfp(vf, vf);
// CHECK: @llvm.ppc.vsx.xvmaxsp
// CHECK-LE: @llvm.ppc.vsx.xvmaxsp

  /* vec_min */
  res_vf = vec_min(vf, vf);
// CHECK: @llvm.ppc.vsx.xvminsp
// CHECK-LE: @llvm.ppc.vsx.xvminsp

  res_vd = vec_min(vd, vd);
// CHECK: @llvm.ppc.vsx.xvmindp
// CHECK-LE: @llvm.ppc.vsx.xvmindp

  res_vf = vec_vminfp(vf, vf);
// CHECK: @llvm.ppc.vsx.xvminsp
// CHECK-LE: @llvm.ppc.vsx.xvminsp

  res_d = __builtin_vsx_xsmaxdp(d, d);
// CHECK: @llvm.ppc.vsx.xsmaxdp
// CHECK-LE: @llvm.ppc.vsx.xsmaxdp

  res_d = __builtin_vsx_xsmindp(d, d);
// CHECK: @llvm.ppc.vsx.xsmindp
// CHECK-LE: @llvm.ppc.vsx.xsmindp

  /* vec_perm */
  res_vsll = vec_perm(vsll, vsll, vuc);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_perm(vull, vull, vuc);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vbll = vec_perm(vbll, vbll, vuc);
// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
// CHECK-LE: xor <16 x i8>
// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>

  res_vf = vec_round(vf);
// CHECK: call <4 x float> @llvm.round.v4f32(<4 x float>
// CHECK-LE: call <4 x float> @llvm.round.v4f32(<4 x float>

  res_vd = vec_round(vd);
// CHECK: call <2 x double> @llvm.round.v2f64(<2 x double>
// CHECK-LE: call <2 x double> @llvm.round.v2f64(<2 x double>

  res_vd = vec_perm(vd, vd, vuc);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vd = vec_splat(vd, 1);
// CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
// CHECK-LE: xor <16 x i8>
// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
// CHECK-LE: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>

  res_vbll = vec_splat(vbll, 1);
// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
// CHECK-LE: xor <16 x i8>
// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>

  res_vsll =  vec_splat(vsll, 1);
// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
// CHECK-LE: xor <16 x i8>
// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>

  res_vull =  vec_splat(vull, 1);
// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
// CHECK-LE: xor <16 x i8>
// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>

  res_vsi = vec_pack(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vui = vec_pack(vull, vull);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vbi = vec_pack(vbll, vbll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vsll = vec_vperm(vsll, vsll, vuc);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_vperm(vull, vull, vuc);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vd = vec_vperm(vd, vd, vuc);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  /* vec_vsx_ld */

  res_vbi = vec_vsx_ld(0, &vbi);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vsi = vec_vsx_ld(0, &vsi);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vsi = vec_vsx_ld(0, asi);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vui = vec_vsx_ld(0, &vui);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vui = vec_vsx_ld(0, aui);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vf = vec_vsx_ld (0, &vf);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vf = vec_vsx_ld (0, af);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vsll = vec_vsx_ld(0, &vsll);
// CHECK: @llvm.ppc.vsx.lxvd2x
// CHECK-LE: @llvm.ppc.vsx.lxvd2x

  res_vull = vec_vsx_ld(0, &vull);
// CHECK: @llvm.ppc.vsx.lxvd2x
// CHECK-LE: @llvm.ppc.vsx.lxvd2x

  res_vd = vec_vsx_ld(0, &vd);
// CHECK: @llvm.ppc.vsx.lxvd2x
// CHECK-LE: @llvm.ppc.vsx.lxvd2x

  res_vd = vec_vsx_ld(0, ad);
// CHECK: @llvm.ppc.vsx.lxvd2x
// CHECK-LE: @llvm.ppc.vsx.lxvd2x

  res_vbs = vec_vsx_ld(0, &vbs);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vss = vec_vsx_ld(0, &vss);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vss = vec_vsx_ld(0, ass);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vus = vec_vsx_ld(0, &vus);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vus = vec_vsx_ld(0, aus);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vbc = vec_vsx_ld(0, &vbc);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vsc = vec_vsx_ld(0, &vsc);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vuc = vec_vsx_ld(0, &vuc);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vsc = vec_vsx_ld(0, asc);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vuc = vec_vsx_ld(0, auc);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  /* vec_vsx_st */

  vec_vsx_st(vbi, 0, &res_vbi);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vbi, 0, res_aui);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vbi, 0, res_asi);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vsi, 0, &res_vsi);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vsi, 0, res_asi);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vui, 0, &res_vui);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vui, 0, res_aui);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vf, 0, &res_vf);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vf, 0, res_af);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vsll, 0, &res_vsll);
// CHECK: @llvm.ppc.vsx.stxvd2x
// CHECK-LE: @llvm.ppc.vsx.stxvd2x

  vec_vsx_st(vull, 0, &res_vull);
// CHECK: @llvm.ppc.vsx.stxvd2x
// CHECK-LE: @llvm.ppc.vsx.stxvd2x

  vec_vsx_st(vd, 0, &res_vd);
// CHECK: @llvm.ppc.vsx.stxvd2x
// CHECK-LE: @llvm.ppc.vsx.stxvd2x

  vec_vsx_st(vd, 0, res_ad);
// CHECK: @llvm.ppc.vsx.stxvd2x
// CHECK-LE: @llvm.ppc.vsx.stxvd2x

  vec_vsx_st(vbs, 0, &res_vbs);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vbs, 0, res_aus);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vbs, 0, res_ass);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vss, 0, &res_vss);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vss, 0, res_ass);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vus, 0, &res_vus);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vus, 0, res_aus);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vsc, 0, &res_vsc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vsc, 0, res_asc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vuc, 0, &res_vuc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vuc, 0, res_auc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vbc, 0, &res_vbc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vbc, 0, res_asc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vbc, 0, res_auc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  /* vec_and */
  res_vsll = vec_and(vsll, vsll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vsll = vec_and(vbll, vsll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vsll = vec_and(vsll, vbll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_and(vull, vull);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_and(vbll, vull);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_and(vull, vbll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vbll = vec_and(vbll, vbll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  /* vec_vand */
  res_vsll = vec_vand(vsll, vsll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vsll = vec_vand(vbll, vsll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vsll = vec_vand(vsll, vbll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_vand(vull, vull);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_vand(vbll, vull);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_vand(vull, vbll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vbll = vec_vand(vbll, vbll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  /* vec_andc */
  res_vsll = vec_andc(vsll, vsll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
// CHECK-LE: xor <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vsll = vec_andc(vbll, vsll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
// CHECK-LE: xor <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vsll = vec_andc(vsll, vbll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
// CHECK-LE: xor <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_andc(vull, vull);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
// CHECK-LE: xor <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_andc(vbll, vull);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
// CHECK-LE: xor <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_andc(vull, vbll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
// CHECK-LE: xor <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vbll = vec_andc(vbll, vbll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
// CHECK-LE: xor <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vf = vec_floor(vf);
// CHECK: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{[0-9]+}})
// CHECK-LE: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{[0-9]+}})

  res_vd = vec_floor(vd);
// CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{[0-9]+}})
// CHECK-LE: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{[0-9]+}})

  res_vf = vec_madd(vf, vf, vf);
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
// CHECK-LE: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})

  res_vd = vec_madd(vd, vd, vd);
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
// CHECK-LE: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})

  /* vec_mergeh */
  res_vsll = vec_mergeh(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vsll = vec_mergeh(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vsll = vec_mergeh(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_mergeh(vull, vull);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_mergeh(vull, vbll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_mergeh(vbll, vull);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  /* vec_mergel */
  res_vsll = vec_mergel(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vsll = vec_mergel(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vsll = vec_mergel(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_mergel(vull, vull);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_mergel(vull, vbll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_mergel(vbll, vull);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  /* vec_msub */
  res_vf = vec_msub(vf, vf, vf);
// CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
// CHECK-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
// CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
// CHECK-LE-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>

  res_vd = vec_msub(vd, vd, vd);
// CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
// CHECK-NEXT: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
// CHECK-LE: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
// CHECK-LE-NEXT: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>

  res_vsll = vec_mul(vsll, vsll);
// CHECK: mul <2 x i64>
// CHECK-LE: mul <2 x i64>

  res_vull = vec_mul(vull, vull);
// CHECK: mul <2 x i64>
// CHECK-LE: mul <2 x i64>

  res_vf = vec_mul(vf, vf);
// CHECK: fmul <4 x float> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: fmul <4 x float> %{{[0-9]+}}, %{{[0-9]+}}

  res_vd = vec_mul(vd, vd);
// CHECK: fmul <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: fmul <2 x double> %{{[0-9]+}}, %{{[0-9]+}}

  res_vf = vec_nearbyint(vf);
// CHECK: call <4 x float> @llvm.round.v4f32(<4 x float> %{{[0-9]+}})
// CHECK-LE: call <4 x float> @llvm.round.v4f32(<4 x float> %{{[0-9]+}})

  res_vd = vec_nearbyint(vd);
// CHECK: call <2 x double> @llvm.round.v2f64(<2 x double> %{{[0-9]+}})
// CHECK-LE: call <2 x double> @llvm.round.v2f64(<2 x double> %{{[0-9]+}})

  res_vf = vec_nmadd(vf, vf, vf);
// CHECK: [[FM:[0-9]+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
// CHECK-NEXT: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %[[FM]]
// CHECK-LE: [[FM:[0-9]+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
// CHECK-LE-NEXT: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %[[FM]]

  res_vd = vec_nmadd(vd, vd, vd);
// CHECK: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
// CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
// CHECK-LE: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
// CHECK-LE-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]

  res_vf = vec_nmsub(vf, vf, vf);
// CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
// CHECK-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
// CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
// CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
// CHECK-LE-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
// CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}

  res_vd = vec_nmsub(vd, vd, vd);
// CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
// CHECK-NEXT: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
// CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
// CHECK-LE: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
// CHECK-LE-NEXT: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
// CHECK-LE-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]

  /* vec_nor */
  res_vsll = vec_nor(vsll, vsll);
// CHECK: or <2 x i64>
// CHECK: xor <2 x i64>
// CHECK-LE: or <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_nor(vull, vull);
// CHECK: or <2 x i64>
// CHECK: xor <2 x i64>
// CHECK-LE: or <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_nor(vbll, vbll);
// CHECK: or <2 x i64>
// CHECK: xor <2 x i64>
// CHECK-LE: or <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vd = vec_nor(vd, vd);
// CHECK: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK: [[OR:%.+]] = or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-NEXT: xor <2 x i64> [[OR]], <i64 -1, i64 -1>
// CHECK-LE: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK-LE: [[OR:%.+]] = or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE-NEXT: xor <2 x i64> [[OR]], <i64 -1, i64 -1>

  /* vec_or */
  res_vsll = vec_or(vsll, vsll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vsll = vec_or(vbll, vsll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vsll = vec_or(vsll, vbll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vull = vec_or(vull, vull);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vull = vec_or(vbll, vull);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vull = vec_or(vull, vbll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vbll = vec_or(vbll, vbll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vd = vec_or(vd, vd);
// CHECK: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK: or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK-LE: or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}

  res_vd = vec_or(vbll, vd);
// CHECK: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK: [[T2:%.+]] = or <2 x i64> %{{[0-9]+}}, [[T1]]
// CHECK: bitcast <2 x i64> [[T2]] to <2 x double>
// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK-LE: [[T2:%.+]] = or <2 x i64> %{{[0-9]+}}, [[T1]]
// CHECK-LE: bitcast <2 x i64> [[T2]] to <2 x double>

  res_vd = vec_or(vd, vbll);
// CHECK: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK: [[T2:%.+]] = or <2 x i64> [[T1]], %{{[0-9]+}}
// CHECK: bitcast <2 x i64> [[T2]] to <2 x double>
// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK-LE: [[T2:%.+]] = or <2 x i64> [[T1]], %{{[0-9]+}}
// CHECK-LE: bitcast <2 x i64> [[T2]] to <2 x double>

  res_vf = vec_re(vf);
// CHECK: call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>
// CHECK-LE: call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>

  res_vd = vec_re(vd);
// CHECK: call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>

  res_vf = vec_rint(vf);
// CHECK: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{[0-9]+}})
// CHECK-LE: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{[0-9]+}})

  res_vd = vec_rint(vd);
// CHECK: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{[0-9]+}})
// CHECK-LE: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{[0-9]+}})

  res_vf = vec_rsqrte(vf);
// CHECK: call <4 x float> @llvm.ppc.vsx.xvrsqrtesp(<4 x float> %{{[0-9]+}})
// CHECK-LE: call <4 x float> @llvm.ppc.vsx.xvrsqrtesp(<4 x float> %{{[0-9]+}})

  res_vd = vec_rsqrte(vd);
// CHECK: call <2 x double> @llvm.ppc.vsx.xvrsqrtedp(<2 x double> %{{[0-9]+}})
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.xvrsqrtedp(<2 x double> %{{[0-9]+}})

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vf = vec_sel(vd, vd, vbll);
// CHECK: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
// CHECK: and <2 x i64> %{{[0-9]+}},
// CHECK: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK: or <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
// CHECK-LE: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
// CHECK-LE: and <2 x i64> %{{[0-9]+}},
// CHECK-LE: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: or <2 x i64>
// CHECK-LE: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_sel(vd, vd, vull);
// CHECK: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
// CHECK: and <2 x i64> %{{[0-9]+}},
// CHECK: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK: or <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
// CHECK-LE: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
// CHECK-LE: and <2 x i64> %{{[0-9]+}},
// CHECK-LE: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: or <2 x i64>
// CHECK-LE: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>

  res_vf = vec_sqrt(vf);
// CHECK: call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{[0-9]+}})
// CHECK-LE: call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{[0-9]+}})

  res_vd = vec_sqrt(vd);
// CHECK: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{[0-9]+}})
// CHECK-LE: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{[0-9]+}})

  res_vd = vec_sub(vd, vd);
// CHECK: fsub <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: fsub <2 x double> %{{[0-9]+}}, %{{[0-9]+}}

  res_vf = vec_trunc(vf);
// CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{[0-9]+}})
// CHECK-LE: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{[0-9]+}})

  res_vd = vec_trunc(vd);
// CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{[0-9]+}})
// CHECK-LE: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{[0-9]+}})

  /* vec_vor */
  res_vsll = vec_vor(vsll, vsll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vsll = vec_vor(vbll, vsll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vsll = vec_vor(vsll, vbll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vull = vec_vor(vull, vull);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vull = vec_vor(vbll, vull);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vull = vec_vor(vull, vbll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vbll = vec_vor(vbll, vbll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  /* vec_xor */
  res_vsll = vec_xor(vsll, vsll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vsll = vec_xor(vbll, vsll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vsll = vec_xor(vsll, vbll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_xor(vull, vull);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_xor(vbll, vull);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_xor(vull, vbll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vbll = vec_xor(vbll, vbll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_xor(vd, vd);
// CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
// CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_xor(vd, vbll);
// CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
// CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_xor(vbll, vd);
// CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
// CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>

  /* vec_vxor */
  res_vsll = vec_vxor(vsll, vsll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vsll = vec_vxor(vbll, vsll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vsll = vec_vxor(vsll, vbll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_vxor(vull, vull);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_vxor(vbll, vull);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_vxor(vull, vbll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vbll = vec_vxor(vbll, vbll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vsll = vec_cts(vd, 0);
// CHECK: fmul <2 x double>
// CHECK: fptosi <2 x double> %{{.*}} to <2 x i64>
// CHECK-LE: fmul <2 x double>
// CHECK-LE: fptosi <2 x double> %{{.*}} to <2 x i64>

  res_vsll = vec_cts(vd, 31);
// CHECK: fmul <2 x double>
// CHECK: fptosi <2 x double> %{{.*}} to <2 x i64>
// CHECK-LE: fmul <2 x double>
// CHECK-LE: fptosi <2 x double> %{{.*}} to <2 x i64>

  res_vsll = vec_ctu(vd, 0);
// CHECK: fmul <2 x double>
// CHECK: fptoui <2 x double> %{{.*}} to <2 x i64>
// CHECK-LE: fmul <2 x double>
// CHECK-LE: fptoui <2 x double> %{{.*}} to <2 x i64>

  res_vsll = vec_ctu(vd, 31);
// CHECK: fmul <2 x double>
// CHECK: fptoui <2 x double> %{{.*}} to <2 x i64>
// CHECK-LE: fmul <2 x double>
// CHECK-LE: fptoui <2 x double> %{{.*}} to <2 x i64>

  res_vd = vec_ctf(vsll, 0);
// CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK: fmul <2 x double>
// CHECK-LE: sitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK-LE: fmul <2 x double>

  res_vd = vec_ctf(vsll, 31);
// CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK: fmul <2 x double>
// CHECK-LE: sitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK-LE: fmul <2 x double>

  res_vd = vec_ctf(vull, 0);
// CHECK: uitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK: fmul <2 x double>
// CHECK-LE: uitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK-LE: fmul <2 x double>

  res_vd = vec_ctf(vull, 31);
// CHECK: uitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK: fmul <2 x double>
// CHECK-LE: uitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK-LE: fmul <2 x double>
}
Example #13
0
static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t *dst,
                                                 const uint8_t *src,
                                                 int dstStride, int srcStride)
{
    register int i;

    LOAD_ZERO;
    const vec_u8 permM2 = vec_lvsl(-2, src);
    const vec_u8 permM1 = vec_lvsl(-1, src);
    const vec_u8 permP0 = vec_lvsl(+0, src);
    const vec_u8 permP1 = vec_lvsl(+1, src);
    const vec_u8 permP2 = vec_lvsl(+2, src);
    const vec_u8 permP3 = vec_lvsl(+3, src);
    const vec_s16 v5ss = vec_splat_s16(5);
    const vec_u16 v5us = vec_splat_u16(5);
    const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
    const vec_s16 v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));

    vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;

    register int align = ((((unsigned long)src) - 2) % 16);

    vec_s16 srcP0A, srcP0B, srcP1A, srcP1B,
              srcP2A, srcP2B, srcP3A, srcP3B,
              srcM1A, srcM1B, srcM2A, srcM2B,
              sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
              pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
              psumA, psumB, sumA, sumB;

    vec_u8 sum, fsum;

    for (i = 0 ; i < 16 ; i ++) {
        vec_u8 srcR1 = vec_ld(-2, src);
        vec_u8 srcR2 = vec_ld(14, src);

        switch (align) {
        default: {
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = vec_perm(srcR1, srcR2, permP1);
            srcP2 = vec_perm(srcR1, srcR2, permP2);
            srcP3 = vec_perm(srcR1, srcR2, permP3);
        } break;
        case 11: {
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = vec_perm(srcR1, srcR2, permP1);
            srcP2 = vec_perm(srcR1, srcR2, permP2);
            srcP3 = srcR2;
        } break;
        case 12: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = vec_perm(srcR1, srcR2, permP1);
            srcP2 = srcR2;
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        case 13: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = srcR2;
            srcP2 = vec_perm(srcR2, srcR3, permP2);
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        case 14: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = srcR2;
            srcP1 = vec_perm(srcR2, srcR3, permP1);
            srcP2 = vec_perm(srcR2, srcR3, permP2);
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        case 15: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = srcR2;
            srcP0 = vec_perm(srcR2, srcR3, permP0);
            srcP1 = vec_perm(srcR2, srcR3, permP1);
            srcP2 = vec_perm(srcR2, srcR3, permP2);
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        }

        srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0);
        srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0);
        srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1);
        srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1);

        srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2);
        srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2);
        srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3);
        srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3);

        srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1);
        srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1);
        srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2);
        srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2);

        sum1A = vec_adds(srcP0A, srcP1A);
        sum1B = vec_adds(srcP0B, srcP1B);
        sum2A = vec_adds(srcM1A, srcP2A);
        sum2B = vec_adds(srcM1B, srcP2B);
        sum3A = vec_adds(srcM2A, srcP3A);
        sum3B = vec_adds(srcM2B, srcP3B);

        pp1A = vec_mladd(sum1A, v20ss, v16ss);
        pp1B = vec_mladd(sum1B, v20ss, v16ss);

        pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
        pp2B = vec_mladd(sum2B, v5ss, zero_s16v);

        pp3A = vec_add(sum3A, pp1A);
        pp3B = vec_add(sum3B, pp1B);

        psumA = vec_sub(pp3A, pp2A);
        psumB = vec_sub(pp3B, pp2B);

        sumA = vec_sra(psumA, v5us);
        sumB = vec_sra(psumB, v5us);

        sum = vec_packsu(sumA, sumB);

        ASSERT_ALIGNED(dst);

        OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));

        vec_st(fsum, 0, dst);

        src += srcStride;
        dst += dstStride;
    }
}
Example #14
0
static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t *dst, int16_t *tmp,
                                                  const uint8_t *src,
                                                  int dstStride, int tmpStride,
                                                  int srcStride)
{
    register int i;
    LOAD_ZERO;
    const vec_u8 permM2 = vec_lvsl(-2, src);
    const vec_u8 permM1 = vec_lvsl(-1, src);
    const vec_u8 permP0 = vec_lvsl(+0, src);
    const vec_u8 permP1 = vec_lvsl(+1, src);
    const vec_u8 permP2 = vec_lvsl(+2, src);
    const vec_u8 permP3 = vec_lvsl(+3, src);
    const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
    const vec_u32 v10ui = vec_splat_u32(10);
    const vec_s16 v5ss = vec_splat_s16(5);
    const vec_s16 v1ss = vec_splat_s16(1);
    const vec_s32 v512si = vec_sl(vec_splat_s32(1),vec_splat_u32(9));
    const vec_u32 v16ui = vec_sl(vec_splat_u32(1),vec_splat_u32(4));

    register int align = ((((unsigned long)src) - 2) % 16);

    vec_s16 srcP0A, srcP0B, srcP1A, srcP1B,
              srcP2A, srcP2B, srcP3A, srcP3B,
              srcM1A, srcM1B, srcM2A, srcM2B,
              sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
              pp1A, pp1B, pp2A, pp2B, psumA, psumB;

    const vec_u8 mperm = (const vec_u8)
        {0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
         0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F};
    int16_t *tmpbis = tmp;

    vec_s16 tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB,
              tmpP0ssA, tmpP0ssB, tmpP1ssA, tmpP1ssB,
              tmpP2ssA, tmpP2ssB;

    vec_s32 pp1Ae, pp1Ao, pp1Be, pp1Bo, pp2Ae, pp2Ao, pp2Be, pp2Bo,
              pp3Ae, pp3Ao, pp3Be, pp3Bo, pp1cAe, pp1cAo, pp1cBe, pp1cBo,
              pp32Ae, pp32Ao, pp32Be, pp32Bo, sumAe, sumAo, sumBe, sumBo,
              ssumAe, ssumAo, ssumBe, ssumBo;
    vec_u8 fsum, sumv, sum;
    vec_s16 ssume, ssumo;

    src -= (2 * srcStride);
    for (i = 0 ; i < 21 ; i ++) {
        vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
        vec_u8 srcR1 = vec_ld(-2, src);
        vec_u8 srcR2 = vec_ld(14, src);

        switch (align) {
        default: {
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = vec_perm(srcR1, srcR2, permP1);
            srcP2 = vec_perm(srcR1, srcR2, permP2);
            srcP3 = vec_perm(srcR1, srcR2, permP3);
        } break;
        case 11: {
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = vec_perm(srcR1, srcR2, permP1);
            srcP2 = vec_perm(srcR1, srcR2, permP2);
            srcP3 = srcR2;
        } break;
        case 12: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = vec_perm(srcR1, srcR2, permP1);
            srcP2 = srcR2;
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        case 13: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = vec_perm(srcR1, srcR2, permP0);
            srcP1 = srcR2;
            srcP2 = vec_perm(srcR2, srcR3, permP2);
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        case 14: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = vec_perm(srcR1, srcR2, permM1);
            srcP0 = srcR2;
            srcP1 = vec_perm(srcR2, srcR3, permP1);
            srcP2 = vec_perm(srcR2, srcR3, permP2);
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        case 15: {
            vec_u8 srcR3 = vec_ld(30, src);
            srcM2 = vec_perm(srcR1, srcR2, permM2);
            srcM1 = srcR2;
            srcP0 = vec_perm(srcR2, srcR3, permP0);
            srcP1 = vec_perm(srcR2, srcR3, permP1);
            srcP2 = vec_perm(srcR2, srcR3, permP2);
            srcP3 = vec_perm(srcR2, srcR3, permP3);
        } break;
        }

        srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0);
        srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0);
        srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1);
        srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1);

        srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2);
        srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2);
        srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3);
        srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3);

        srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1);
        srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1);
        srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2);
        srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2);

        sum1A = vec_adds(srcP0A, srcP1A);
        sum1B = vec_adds(srcP0B, srcP1B);
        sum2A = vec_adds(srcM1A, srcP2A);
        sum2B = vec_adds(srcM1B, srcP2B);
        sum3A = vec_adds(srcM2A, srcP3A);
        sum3B = vec_adds(srcM2B, srcP3B);

        pp1A = vec_mladd(sum1A, v20ss, sum3A);
        pp1B = vec_mladd(sum1B, v20ss, sum3B);

        pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
        pp2B = vec_mladd(sum2B, v5ss, zero_s16v);

        psumA = vec_sub(pp1A, pp2A);
        psumB = vec_sub(pp1B, pp2B);

        vec_st(psumA, 0, tmp);
        vec_st(psumB, 16, tmp);

        src += srcStride;
        tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */
    }

    tmpM2ssA = vec_ld(0, tmpbis);
    tmpM2ssB = vec_ld(16, tmpbis);
    tmpbis += tmpStride;
    tmpM1ssA = vec_ld(0, tmpbis);
    tmpM1ssB = vec_ld(16, tmpbis);
    tmpbis += tmpStride;
    tmpP0ssA = vec_ld(0, tmpbis);
    tmpP0ssB = vec_ld(16, tmpbis);
    tmpbis += tmpStride;
    tmpP1ssA = vec_ld(0, tmpbis);
    tmpP1ssB = vec_ld(16, tmpbis);
    tmpbis += tmpStride;
    tmpP2ssA = vec_ld(0, tmpbis);
    tmpP2ssB = vec_ld(16, tmpbis);
    tmpbis += tmpStride;

    for (i = 0 ; i < 16 ; i++) {
        const vec_s16 tmpP3ssA = vec_ld(0, tmpbis);
        const vec_s16 tmpP3ssB = vec_ld(16, tmpbis);

        const vec_s16 sum1A = vec_adds(tmpP0ssA, tmpP1ssA);
        const vec_s16 sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
        const vec_s16 sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
        const vec_s16 sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
        const vec_s16 sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
        const vec_s16 sum3B = vec_adds(tmpM2ssB, tmpP3ssB);

        tmpbis += tmpStride;

        tmpM2ssA = tmpM1ssA;
        tmpM2ssB = tmpM1ssB;
        tmpM1ssA = tmpP0ssA;
        tmpM1ssB = tmpP0ssB;
        tmpP0ssA = tmpP1ssA;
        tmpP0ssB = tmpP1ssB;
        tmpP1ssA = tmpP2ssA;
        tmpP1ssB = tmpP2ssB;
        tmpP2ssA = tmpP3ssA;
        tmpP2ssB = tmpP3ssB;

        pp1Ae = vec_mule(sum1A, v20ss);
        pp1Ao = vec_mulo(sum1A, v20ss);
        pp1Be = vec_mule(sum1B, v20ss);
        pp1Bo = vec_mulo(sum1B, v20ss);

        pp2Ae = vec_mule(sum2A, v5ss);
        pp2Ao = vec_mulo(sum2A, v5ss);
        pp2Be = vec_mule(sum2B, v5ss);
        pp2Bo = vec_mulo(sum2B, v5ss);

        pp3Ae = vec_sra((vec_s32)sum3A, v16ui);
        pp3Ao = vec_mulo(sum3A, v1ss);
        pp3Be = vec_sra((vec_s32)sum3B, v16ui);
        pp3Bo = vec_mulo(sum3B, v1ss);

        pp1cAe = vec_add(pp1Ae, v512si);
        pp1cAo = vec_add(pp1Ao, v512si);
        pp1cBe = vec_add(pp1Be, v512si);
        pp1cBo = vec_add(pp1Bo, v512si);

        pp32Ae = vec_sub(pp3Ae, pp2Ae);
        pp32Ao = vec_sub(pp3Ao, pp2Ao);
        pp32Be = vec_sub(pp3Be, pp2Be);
        pp32Bo = vec_sub(pp3Bo, pp2Bo);

        sumAe = vec_add(pp1cAe, pp32Ae);
        sumAo = vec_add(pp1cAo, pp32Ao);
        sumBe = vec_add(pp1cBe, pp32Be);
        sumBo = vec_add(pp1cBo, pp32Bo);

        ssumAe = vec_sra(sumAe, v10ui);
        ssumAo = vec_sra(sumAo, v10ui);
        ssumBe = vec_sra(sumBe, v10ui);
        ssumBo = vec_sra(sumBo, v10ui);

        ssume = vec_packs(ssumAe, ssumBe);
        ssumo = vec_packs(ssumAo, ssumBo);

        sumv = vec_packsu(ssume, ssumo);
        sum = vec_perm(sumv, sumv, mperm);

        ASSERT_ALIGNED(dst);

        OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));

        vec_st(fsum, 0, dst);

        dst += dstStride;
    }
}
Example #15
0
void vec_high_v2df (vector double *a, vector double *b, vector double *c)
{
  *a = vec_mergeh (*b, *c);
}
Example #16
0
/** Do inverse transform on 8x4 part of block
*/
static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, DCTELEM *block)
{
    vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
    vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
    vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
    vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
    const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
    const vector unsigned int vec_7 = vec_splat_u32(7);
    const vector unsigned int vec_5 = vec_splat_u32(5);
    const vector unsigned int vec_4 = vec_splat_u32(4);
    const vector  signed int vec_4s = vec_splat_s32(4);
    const vector unsigned int vec_3 = vec_splat_u32(3);
    const vector unsigned int vec_2 = vec_splat_u32(2);
    const vector unsigned int vec_1 = vec_splat_u32(1);
    vector unsigned char tmp;
    vector signed short tmp2, tmp3;
    vector unsigned char perm0, perm1, p0, p1, p;

    src0 = vec_ld(  0, block);
    src1 = vec_ld( 16, block);
    src2 = vec_ld( 32, block);
    src3 = vec_ld( 48, block);
    src4 = vec_ld( 64, block);
    src5 = vec_ld( 80, block);
    src6 = vec_ld( 96, block);
    src7 = vec_ld(112, block);

    TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
    s0 = vec_unpackl(src0);
    s1 = vec_unpackl(src1);
    s2 = vec_unpackl(src2);
    s3 = vec_unpackl(src3);
    s4 = vec_unpackl(src4);
    s5 = vec_unpackl(src5);
    s6 = vec_unpackl(src6);
    s7 = vec_unpackl(src7);
    s8 = vec_unpackh(src0);
    s9 = vec_unpackh(src1);
    sA = vec_unpackh(src2);
    sB = vec_unpackh(src3);
    sC = vec_unpackh(src4);
    sD = vec_unpackh(src5);
    sE = vec_unpackh(src6);
    sF = vec_unpackh(src7);
    STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
    SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
    STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
    SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
    src0 = vec_pack(s8, s0);
    src1 = vec_pack(s9, s1);
    src2 = vec_pack(sA, s2);
    src3 = vec_pack(sB, s3);
    src4 = vec_pack(sC, s4);
    src5 = vec_pack(sD, s5);
    src6 = vec_pack(sE, s6);
    src7 = vec_pack(sF, s7);
    TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);

    s0 = vec_unpackh(src0);
    s1 = vec_unpackh(src1);
    s2 = vec_unpackh(src2);
    s3 = vec_unpackh(src3);
    s8 = vec_unpackl(src0);
    s9 = vec_unpackl(src1);
    sA = vec_unpackl(src2);
    sB = vec_unpackl(src3);
    STEP4(s0, s1, s2, s3, vec_64);
    SHIFT_VERT4(s0, s1, s2, s3);
    STEP4(s8, s9, sA, sB, vec_64);
    SHIFT_VERT4(s8, s9, sA, sB);
    src0 = vec_pack(s0, s8);
    src1 = vec_pack(s1, s9);
    src2 = vec_pack(s2, sA);
    src3 = vec_pack(s3, sB);

    p0 = vec_lvsl (0, dest);
    p1 = vec_lvsl (stride, dest);
    p = vec_splat_u8 (-1);
    perm0 = vec_mergeh (p, p0);
    perm1 = vec_mergeh (p, p1);

#define ADD(dest,src,perm)                                              \
    /* *(uint64_t *)&tmp = *(uint64_t *)dest; */                        \
    tmp = vec_ld (0, dest);                                             \
    tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), perm);  \
    tmp3 = vec_adds (tmp2, src);                                        \
    tmp = vec_packsu (tmp3, tmp3);                                      \
    vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest);        \
    vec_ste ((vector unsigned int)tmp, 4, (unsigned int *)dest);

    ADD (dest, src0, perm0)      dest += stride;
    ADD (dest, src1, perm1)      dest += stride;
    ADD (dest, src2, perm0)      dest += stride;
    ADD (dest, src3, perm1)
}
Example #17
0
void vec_high_v2di (vector long long *a, vector long long *b, vector long long *c)
{
  *a = vec_mergeh (*b, *c);
}
Example #18
0
void x264_zigzag_scan_8x8_frame_altivec( int16_t level[64], int16_t dct[64] )
{
    vec_s16_t tmpv[6];
    vec_s16_t dct0v = vec_ld( 0*16, dct );
    vec_s16_t dct1v = vec_ld( 1*16, dct );
    vec_s16_t dct2v = vec_ld( 2*16, dct );
    vec_s16_t dct3v = vec_ld( 3*16, dct );
    vec_s16_t dct4v = vec_ld( 4*16, dct );
    vec_s16_t dct5v = vec_ld( 5*16, dct );
    vec_s16_t dct6v = vec_ld( 6*16, dct );
    vec_s16_t dct7v = vec_ld( 7*16, dct );

    const vec_u8_t mask1[14] = {
        { 0x00, 0x01, 0x02, 0x03, 0x12, 0x13, 0x14, 0x15, 0x0A, 0x0B, 0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D },
        { 0x0A, 0x0B, 0x0C, 0x0D, 0x00, 0x00, 0x0E, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x12, 0x13 },
        { 0x00, 0x01, 0x02, 0x03, 0x18, 0x19, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F },
        { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F },
        { 0x00, 0x00, 0x14, 0x15, 0x18, 0x19, 0x02, 0x03, 0x04, 0x05, 0x08, 0x09, 0x06, 0x07, 0x12, 0x13 },
        { 0x12, 0x13, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F },
        { 0x1A, 0x1B, 0x10, 0x11, 0x08, 0x09, 0x04, 0x05, 0x02, 0x03, 0x0C, 0x0D, 0x14, 0x15, 0x18, 0x19 },
        { 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x0A, 0x0B },
        { 0x00, 0x01, 0x02, 0x03, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x06, 0x07, 0x04, 0x05, 0x08, 0x09 },
        { 0x00, 0x11, 0x16, 0x17, 0x18, 0x19, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x1A, 0x1B },
        { 0x02, 0x03, 0x18, 0x19, 0x16, 0x17, 0x1A, 0x1B, 0x1C, 0x1D, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09 },
        { 0x08, 0x09, 0x0A, 0x0B, 0x06, 0x07, 0x0E, 0x0F, 0x10, 0x11, 0x00, 0x00, 0x12, 0x13, 0x14, 0x15 },
        { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F },
        { 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x08, 0x09, 0x06, 0x07, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F }
    };

    tmpv[0] = vec_mergeh( dct0v, dct1v );
    tmpv[1] = vec_mergeh( dct2v, dct3v );
    tmpv[2] = (vec_s16_t)vec_mergeh( (vec_s32_t)tmpv[0], (vec_s32_t)tmpv[1] );
    tmpv[3] = vec_perm( tmpv[2], dct0v, mask1[0] );
    vec_st( tmpv[3], 0*16, level );

    tmpv[4] = vec_mergeh( dct4v, dct5v );
    tmpv[3] = vec_perm( tmpv[0], tmpv[4], mask1[1] );
    tmpv[3] = vec_perm( tmpv[3], dct0v, mask1[2] );
    tmpv[3] = vec_perm( tmpv[3], tmpv[1], mask1[3] );
    vec_st( tmpv[3], 1*16, level );

    tmpv[3] = vec_mergel( dct0v, dct1v );
    tmpv[1] = vec_mergel( tmpv[1], dct2v );
    tmpv[5] = vec_perm( tmpv[3], tmpv[1], mask1[4] );
    tmpv[5] = vec_perm( tmpv[5], dct4v, mask1[5] );
    vec_st( tmpv[5], 2*16, level );

    tmpv[2] = vec_mergeh( dct5v, dct6v );
    tmpv[5] = vec_mergeh( tmpv[2], dct7v );
    tmpv[4] = vec_mergel( tmpv[4], tmpv[1] );
    tmpv[0] = vec_perm( tmpv[5], tmpv[4], mask1[6] );
    vec_st( tmpv[0], 3*16, level );

    tmpv[1] = vec_mergel( dct2v, dct3v );
    tmpv[0] = vec_mergel( dct4v, dct5v );
    tmpv[4] = vec_perm( tmpv[1], tmpv[0], mask1[7] );
    tmpv[3] = vec_perm( tmpv[4], tmpv[3], mask1[8] );
    vec_st( tmpv[3], 4*16, level );

    tmpv[3] = vec_mergeh( dct6v, dct7v );
    tmpv[2] = vec_mergel( dct3v, dct4v );
    tmpv[2] = vec_perm( tmpv[2], dct5v, mask1[9] );
    tmpv[3] = vec_perm( tmpv[2], tmpv[3], mask1[10] );
    vec_st( tmpv[3], 5*16, level );

    tmpv[1] = vec_mergel( tmpv[1], tmpv[2] );
    tmpv[2] = vec_mergel( dct6v, dct7v );
    tmpv[1] = vec_perm( tmpv[1], tmpv[2], mask1[11] );
    tmpv[1] = vec_perm( tmpv[1], dct7v, mask1[12] );
    vec_st( tmpv[1], 6*16, level );

    tmpv[2] = vec_perm( tmpv[2], tmpv[0], mask1[13] );
    vec_st( tmpv[2], 7*16, level );
}
Example #19
0
/* next one assumes that ((line_size % 8) == 0) */
static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
    register vector unsigned char blockv, temp1, temp2, blocktemp;
    register vector unsigned short pixelssum1, pixelssum2, temp3;

    register const vector unsigned char vczero = (const vector unsigned char)
                                        vec_splat_u8(0);
    register const vector unsigned short vctwo = (const vector unsigned short)
                                        vec_splat_u16(2);

    temp1 = vec_ld(0, pixels);
    temp2 = vec_ld(16, pixels);
    pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
    if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
        pixelsv2 = temp2;
    } else {
        pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
    }
    pixelsv1 = vec_mergeh(vczero, pixelsv1);
    pixelsv2 = vec_mergeh(vczero, pixelsv2);
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vctwo);

    for (i = 0; i < h ; i++) {
        int rightside = ((unsigned long)block & 0x0000000F);
        blockv = vec_ld(0, block);

        temp1 = vec_ld(line_size, pixels);
        temp2 = vec_ld(line_size + 16, pixels);
        pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
        if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
            pixelsv2 = temp2;
        } else {
            pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
        }

        pixelsv1 = vec_mergeh(vczero, pixelsv1);
        pixelsv2 = vec_mergeh(vczero, pixelsv2);
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);
        pixelssum1 = vec_add(pixelssum2, vctwo);
        pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);

        if (rightside) {
            blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
        } else {
            blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
        }

        blockv = vec_avg(blocktemp, blockv);
        vec_st(blockv, 0, block);

        block += line_size;
        pixels += line_size;
    }
}
Example #20
0
void x264_zigzag_interleave_8x8_cavlc_altivec( int16_t *dst, int16_t *src, uint8_t *nnz )
{
    vec_s16_t tmpv[8];
    vec_s16_t merge[2];
    vec_s16_t permv[2];
    vec_s16_t orv[4];
    vec_s16_t src0v = vec_ld( 0*16, src );
    vec_s16_t src1v = vec_ld( 1*16, src );
    vec_s16_t src2v = vec_ld( 2*16, src );
    vec_s16_t src3v = vec_ld( 3*16, src );
    vec_s16_t src4v = vec_ld( 4*16, src );
    vec_s16_t src5v = vec_ld( 5*16, src );
    vec_s16_t src6v = vec_ld( 6*16, src );
    vec_s16_t src7v = vec_ld( 7*16, src );
    vec_u8_t pack;
    vec_u8_t nnzv = vec_vsx_ld( 0, nnz );
    vec_u8_t shift = vec_splat_u8( 7 );
    LOAD_ZERO;

    const vec_u8_t mask[3] = {
        { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 },
        { 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F },
        { 0x10, 0x11, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x12, 0x13, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F }
    };

    tmpv[0] = vec_mergeh( src0v, src1v );
    tmpv[1] = vec_mergel( src0v, src1v );

    tmpv[2] = vec_mergeh( src2v, src3v );
    tmpv[3] = vec_mergel( src2v, src3v );

    tmpv[4] = vec_mergeh( src4v, src5v );
    tmpv[5] = vec_mergel( src4v, src5v );

    tmpv[6] = vec_mergeh( src6v, src7v );
    tmpv[7] = vec_mergel( src6v, src7v );

    merge[0] = vec_mergeh( tmpv[0], tmpv[1] );
    merge[1] = vec_mergeh( tmpv[2], tmpv[3] );
    permv[0] = vec_perm( merge[0], merge[1], mask[0] );
    permv[1] = vec_perm( merge[0], merge[1], mask[1] );
    vec_st( permv[0], 0*16, dst );

    merge[0] = vec_mergeh( tmpv[4], tmpv[5] );
    merge[1] = vec_mergeh( tmpv[6], tmpv[7] );
    permv[0] = vec_perm( merge[0], merge[1], mask[0] );
    permv[2] = vec_perm( merge[0], merge[1], mask[1] );
    vec_st( permv[0], 1*16, dst );
    vec_st( permv[1], 2*16, dst );
    vec_st( permv[2], 3*16, dst );

    merge[0] = vec_mergel( tmpv[0], tmpv[1] );
    merge[1] = vec_mergel( tmpv[2], tmpv[3] );
    permv[0] = vec_perm( merge[0], merge[1], mask[0] );
    permv[1] = vec_perm( merge[0], merge[1], mask[1] );
    vec_st( permv[0], 4*16, dst );

    merge[0] = vec_mergel( tmpv[4], tmpv[5] );
    merge[1] = vec_mergel( tmpv[6], tmpv[7] );
    permv[0] = vec_perm( merge[0], merge[1], mask[0] );
    permv[2] = vec_perm( merge[0], merge[1], mask[1] );
    vec_st( permv[0], 5*16, dst );
    vec_st( permv[1], 6*16, dst );
    vec_st( permv[2], 7*16, dst );

    orv[0] = vec_or( src0v, src1v );
    orv[1] = vec_or( src2v, src3v );
    orv[2] = vec_or( src4v, src5v );
    orv[3] = vec_or( src6v, src7v );

    permv[0] = vec_or( orv[0], orv[1] );
    permv[1] = vec_or( orv[2], orv[3] );
    permv[0] = vec_or( permv[0], permv[1] );

    permv[1] = vec_perm( permv[0], permv[0], mask[1] );
    permv[0] = vec_or( permv[0], permv[1] );

    pack = (vec_u8_t)vec_packs( permv[0], permv[0] );
    pack = (vec_u8_t)vec_cmpeq( pack, zerov );
    pack = vec_nor( pack, zerov );
    pack = vec_sr( pack, shift );
    nnzv = vec_perm( nnzv, pack, mask[2] );
    vec_st( nnzv, 0, nnz );
}
Example #21
0
void pix_subtract :: processYUV_Altivec(imageStruct &image, imageStruct &right)
{
  long h,w,width;

   width = image.xsize/8;
   //format is U Y V Y
    union
    {
        //unsigned int	i;
        short	elements[8];
        //vector signed char v;
        vector	short v;
    }shortBuffer;

        union
    {
        //unsigned int	i;
        unsigned char	elements[16];
        //vector signed char v;
        vector	unsigned char v;
    }charBuffer;

    //vector unsigned char c;
    vector signed short d, hiImage, loImage, YRight, UVRight, YImage, UVImage, UVTemp, YTemp;
  //  vector unsigned char zero = vec_splat_u8(0);
    vector unsigned char c,one;
   // vector signed short zshort = vec_splat_s16(0);
    vector unsigned char *inData = (vector unsigned char*) image.data;
    vector unsigned char *rightData = (vector unsigned char*) right.data;

    //Write the pixel (pair) to the transfer buffer
    charBuffer.elements[0] = 2;
    charBuffer.elements[1] = 1;
    charBuffer.elements[2] = 2;
    charBuffer.elements[3] = 1;
    charBuffer.elements[4] = 2;
    charBuffer.elements[5] = 1;
    charBuffer.elements[6] = 2;
    charBuffer.elements[7] = 1;
    charBuffer.elements[8] = 2;
    charBuffer.elements[9] = 1;
    charBuffer.elements[10] = 2;
    charBuffer.elements[11] = 1;
    charBuffer.elements[12] = 2;
    charBuffer.elements[13] = 1;
    charBuffer.elements[14] = 2;
    charBuffer.elements[15] = 1;

    //Load it into the vector unit
    c = charBuffer.v;

    one =  vec_splat_u8( 1 );

    shortBuffer.elements[0] = 255;

    //Load it into the vector unit
    d = shortBuffer.v;
    d = (vector signed short)vec_splat((vector signed short)d,0);
#ifndef PPC970
   	UInt32			prefetchSize = GetPrefetchConstant( 16, 1, 256 );
	vec_dst( inData, prefetchSize, 0 );
    #endif
    for ( h=0; h<image.ysize; h++){
        for (w=0; w<width; w++)
        {
        #ifndef PPC970
	vec_dst( inData, prefetchSize, 0 );
           #endif
            //interleaved U Y V Y chars

            //vec_mule UV * 2 to short vector U V U V shorts
            UVImage = (vector signed short)vec_mule(one,inData[0]);
            UVRight = (vector signed short)vec_mule(c,rightData[0]);

            //vec_mulo Y * 1 to short vector Y Y Y Y shorts
            YImage = (vector signed short)vec_mulo(c,inData[0]);
            YRight = (vector signed short)vec_mulo(c,rightData[0]);

            //vel_subs UV - 255
            UVRight = (vector signed short)vec_subs(UVRight, d);

            //vec_adds UV
            UVTemp = vec_subs(UVImage,UVRight);

            //vec_adds Y
            YTemp = vec_subs(YImage,YRight);

            hiImage = vec_mergeh(UVTemp,YTemp);
            loImage = vec_mergel(UVTemp,YTemp);

            //vec_mergel + vec_mergeh Y and UV
            inData[0] = vec_packsu(hiImage, loImage);

            inData++;
            rightData++;
        }
        #ifndef PPC970
        vec_dss( 0 );
        #endif
    }  /*end of working altivec function */
}
Example #22
0
void v_expand_u8(vector unsigned char* a, vector unsigned short* b0, vector unsigned short* b1)
{
  *b0 = (vector unsigned short)vec_mergeh(*a, vec_splats((unsigned char)0));
  *b1 = (vector unsigned short)vec_mergel(*a, vec_splats((unsigned char)0));
}
Example #23
0
inline v_float64x2 v_cvt_f64(const v_float32x4& a)
{ return v_float64x2(vec_cvfo(vec_mergeh(a.val, a.val))); }
Example #24
0
void v_expand_u16(vector unsigned short* a, vector unsigned int* b0, vector unsigned int* b1)
{
    *b0 = (vector unsigned int)vec_mergeh(*a, vec_splats((unsigned short)0));
    *b1 = (vector unsigned int)vec_mergel(*a, vec_splats((unsigned short)0));
}
Example #25
0
File: dct.c Project: 0day-ci/gcc
inline void
transpose_vmx (vector signed short *input, vector signed short *output)
{
  vector signed short v0, v1, v2, v3, v4, v5, v6, v7;
  vector signed short x0, x1, x2, x3, x4, x5, x6, x7;

  /* Matrix transpose */
  v0 = vec_mergeh (input[0], input[4]);
  v1 = vec_mergel (input[0], input[4]);
  v2 = vec_mergeh (input[1], input[5]);
  v3 = vec_mergel (input[1], input[5]);
  v4 = vec_mergeh (input[2], input[6]);
  v5 = vec_mergel (input[2], input[6]);
  v6 = vec_mergeh (input[3], input[7]);
  v7 = vec_mergel (input[3], input[7]);

  x0 = vec_mergeh (v0, v4);
  x1 = vec_mergel (v0, v4);
  x2 = vec_mergeh (v1, v5);
  x3 = vec_mergel (v1, v5);
  x4 = vec_mergeh (v2, v6);
  x5 = vec_mergel (v2, v6);
  x6 = vec_mergeh (v3, v7);
  x7 = vec_mergel (v3, v7);

  output[0] = vec_mergeh (x0, x4);
  output[1] = vec_mergel (x0, x4);
  output[2] = vec_mergeh (x1, x5);
  output[3] = vec_mergel (x1, x5);
  output[4] = vec_mergeh (x2, x6);
  output[5] = vec_mergel (x2, x6);
  output[6] = vec_mergeh (x3, x7);
  output[7] = vec_mergel (x3, x7);
}
Example #26
0
int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
{
    int i;
    int s __attribute__((aligned(16)));
    uint8_t *pix3 = pix2 + line_size;
    const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
    const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
    vector unsigned char *tv, avgv, t5;
    vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
    vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
    vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
    vector unsigned short avghv, avglv;
    vector unsigned short t1, t2, t3, t4;
    vector unsigned int sad;
    vector signed int sumdiffs;

    sad = (vector unsigned int)vec_splat_u32(0);
    
    s = 0;

    /*
       Due to the fact that pix3 = pix2 + line_size, the pix3 of one
       iteration becomes pix2 in the next iteration. We can use this
       fact to avoid a potentially expensive unaligned read, as well
       as some splitting, and vector addition each time around the loop.
       Read unaligned pixels into our vectors. The vectors are as follows:
       pix2v: pix2[0]-pix2[15]	pix2iv: pix2[1]-pix2[16]
       Split the pixel vectors into shorts
    */
    tv = (vector unsigned char *) &pix2[0];
    pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));

    tv = (vector unsigned char *) &pix2[1];
    pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));

    pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
    pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
    pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
    pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
    t1 = vec_add(pix2hv, pix2ihv);
    t2 = vec_add(pix2lv, pix2ilv);
    
    for(i=0;i<16;i++) {
        /*
           Read unaligned pixels into our vectors. The vectors are as follows:
           pix1v: pix1[0]-pix1[15]
           pix3v: pix3[0]-pix3[15]	pix3iv: pix3[1]-pix3[16]
        */
        tv = (vector unsigned char *) pix1;
        pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));

        tv = (vector unsigned char *) &pix3[0];
        pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));

        tv = (vector unsigned char *) &pix3[1];
        pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1]));

        /*
          Note that Altivec does have vec_avg, but this works on vector pairs
          and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
          would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
          Instead, we have to split the pixel vectors into vectors of shorts,
          and do the averaging by hand.
        */

        /* Split the pixel vectors into shorts */
        pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
        pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
        pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
        pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);

        /* Do the averaging on them */
        t3 = vec_add(pix3hv, pix3ihv);
        t4 = vec_add(pix3lv, pix3ilv);

        avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
        avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);

        /* Pack the shorts back into a result */
        avgv = vec_pack(avghv, avglv);

        /* Calculate a sum of abs differences vector */
        t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));

        /* Add each 4 pixel group together and put 4 results into sad */
        sad = vec_sum4s(t5, sad);

        pix1 += line_size;
        pix3 += line_size;
        /* Transfer the calculated values for pix3 into pix2 */
        t1 = t3;
        t2 = t4;
    }
    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);

    return s;
}
Example #27
0
static int dct_quantize_altivec(MpegEncContext* s,
                         DCTELEM* data, int n,
                         int qscale, int* overflow)
{
    int lastNonZero;
    vector float row0, row1, row2, row3, row4, row5, row6, row7;
    vector float alt0, alt1, alt2, alt3, alt4, alt5, alt6, alt7;
    const vector float zero = (const vector float)FOUROF(0.);
    // used after quantize step
    int oldBaseValue = 0;

    // Load the data into the row/alt vectors
    {
        vector signed short data0, data1, data2, data3, data4, data5, data6, data7;

        data0 = vec_ld(0, data);
        data1 = vec_ld(16, data);
        data2 = vec_ld(32, data);
        data3 = vec_ld(48, data);
        data4 = vec_ld(64, data);
        data5 = vec_ld(80, data);
        data6 = vec_ld(96, data);
        data7 = vec_ld(112, data);

        // Transpose the data before we start
        TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7);

        // load the data into floating point vectors.  We load
        // the high half of each row into the main row vectors
        // and the low half into the alt vectors.
        row0 = vec_ctf(vec_unpackh(data0), 0);
        alt0 = vec_ctf(vec_unpackl(data0), 0);
        row1 = vec_ctf(vec_unpackh(data1), 0);
        alt1 = vec_ctf(vec_unpackl(data1), 0);
        row2 = vec_ctf(vec_unpackh(data2), 0);
        alt2 = vec_ctf(vec_unpackl(data2), 0);
        row3 = vec_ctf(vec_unpackh(data3), 0);
        alt3 = vec_ctf(vec_unpackl(data3), 0);
        row4 = vec_ctf(vec_unpackh(data4), 0);
        alt4 = vec_ctf(vec_unpackl(data4), 0);
        row5 = vec_ctf(vec_unpackh(data5), 0);
        alt5 = vec_ctf(vec_unpackl(data5), 0);
        row6 = vec_ctf(vec_unpackh(data6), 0);
        alt6 = vec_ctf(vec_unpackl(data6), 0);
        row7 = vec_ctf(vec_unpackh(data7), 0);
        alt7 = vec_ctf(vec_unpackl(data7), 0);
    }

    // The following block could exist as a separate an altivec dct
                // function.  However, if we put it inline, the DCT data can remain
                // in the vector local variables, as floats, which we'll use during the
                // quantize step...
    {
        const vector float vec_0_298631336 = (vector float)FOUROF(0.298631336f);
        const vector float vec_0_390180644 = (vector float)FOUROF(-0.390180644f);
        const vector float vec_0_541196100 = (vector float)FOUROF(0.541196100f);
        const vector float vec_0_765366865 = (vector float)FOUROF(0.765366865f);
        const vector float vec_0_899976223 = (vector float)FOUROF(-0.899976223f);
        const vector float vec_1_175875602 = (vector float)FOUROF(1.175875602f);
        const vector float vec_1_501321110 = (vector float)FOUROF(1.501321110f);
        const vector float vec_1_847759065 = (vector float)FOUROF(-1.847759065f);
        const vector float vec_1_961570560 = (vector float)FOUROF(-1.961570560f);
        const vector float vec_2_053119869 = (vector float)FOUROF(2.053119869f);
        const vector float vec_2_562915447 = (vector float)FOUROF(-2.562915447f);
        const vector float vec_3_072711026 = (vector float)FOUROF(3.072711026f);


        int whichPass, whichHalf;

        for(whichPass = 1; whichPass<=2; whichPass++) {
            for(whichHalf = 1; whichHalf<=2; whichHalf++) {
                vector float tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
                vector float tmp10, tmp11, tmp12, tmp13;
                vector float z1, z2, z3, z4, z5;

                tmp0 = vec_add(row0, row7); // tmp0 = dataptr[0] + dataptr[7];
                tmp7 = vec_sub(row0, row7); // tmp7 = dataptr[0] - dataptr[7];
                tmp3 = vec_add(row3, row4); // tmp3 = dataptr[3] + dataptr[4];
                tmp4 = vec_sub(row3, row4); // tmp4 = dataptr[3] - dataptr[4];
                tmp1 = vec_add(row1, row6); // tmp1 = dataptr[1] + dataptr[6];
                tmp6 = vec_sub(row1, row6); // tmp6 = dataptr[1] - dataptr[6];
                tmp2 = vec_add(row2, row5); // tmp2 = dataptr[2] + dataptr[5];
                tmp5 = vec_sub(row2, row5); // tmp5 = dataptr[2] - dataptr[5];

                tmp10 = vec_add(tmp0, tmp3); // tmp10 = tmp0 + tmp3;
                tmp13 = vec_sub(tmp0, tmp3); // tmp13 = tmp0 - tmp3;
                tmp11 = vec_add(tmp1, tmp2); // tmp11 = tmp1 + tmp2;
                tmp12 = vec_sub(tmp1, tmp2); // tmp12 = tmp1 - tmp2;


                // dataptr[0] = (DCTELEM) ((tmp10 + tmp11) << PASS1_BITS);
                row0 = vec_add(tmp10, tmp11);

                // dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS);
                row4 = vec_sub(tmp10, tmp11);


                // z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100);
                z1 = vec_madd(vec_add(tmp12, tmp13), vec_0_541196100, (vector float)zero);

                // dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865),
                //                                CONST_BITS-PASS1_BITS);
                row2 = vec_madd(tmp13, vec_0_765366865, z1);

                // dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065),
                //                                CONST_BITS-PASS1_BITS);
                row6 = vec_madd(tmp12, vec_1_847759065, z1);

                z1 = vec_add(tmp4, tmp7); // z1 = tmp4 + tmp7;
                z2 = vec_add(tmp5, tmp6); // z2 = tmp5 + tmp6;
                z3 = vec_add(tmp4, tmp6); // z3 = tmp4 + tmp6;
                z4 = vec_add(tmp5, tmp7); // z4 = tmp5 + tmp7;

                // z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */
                z5 = vec_madd(vec_add(z3, z4), vec_1_175875602, (vector float)zero);

                // z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
                z3 = vec_madd(z3, vec_1_961570560, z5);

                // z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
                z4 = vec_madd(z4, vec_0_390180644, z5);

                // The following adds are rolled into the multiplies above
                // z3 = vec_add(z3, z5);  // z3 += z5;
                // z4 = vec_add(z4, z5);  // z4 += z5;

                // z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
                // Wow!  It's actually more efficient to roll this multiply
                // into the adds below, even thought the multiply gets done twice!
                // z2 = vec_madd(z2, vec_2_562915447, (vector float)zero);

                // z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
                // Same with this one...
                // z1 = vec_madd(z1, vec_0_899976223, (vector float)zero);

                // tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
                // dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS);
                row7 = vec_madd(tmp4, vec_0_298631336, vec_madd(z1, vec_0_899976223, z3));

                // tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
                // dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS);
                row5 = vec_madd(tmp5, vec_2_053119869, vec_madd(z2, vec_2_562915447, z4));

                // tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
                // dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS);
                row3 = vec_madd(tmp6, vec_3_072711026, vec_madd(z2, vec_2_562915447, z3));

                // tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
                // dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS);
                row1 = vec_madd(z1, vec_0_899976223, vec_madd(tmp7, vec_1_501321110, z4));

                // Swap the row values with the alts.  If this is the first half,
                // this sets up the low values to be acted on in the second half.
                // If this is the second half, it puts the high values back in
                // the row values where they are expected to be when we're done.
                SWAP(row0, alt0);
                SWAP(row1, alt1);
                SWAP(row2, alt2);
                SWAP(row3, alt3);
                SWAP(row4, alt4);
                SWAP(row5, alt5);
                SWAP(row6, alt6);
                SWAP(row7, alt7);
            }

            if (whichPass == 1) {
                // transpose the data for the second pass

                // First, block transpose the upper right with lower left.
                SWAP(row4, alt0);
                SWAP(row5, alt1);
                SWAP(row6, alt2);
                SWAP(row7, alt3);

                // Now, transpose each block of four
                TRANSPOSE4(row0, row1, row2, row3);
                TRANSPOSE4(row4, row5, row6, row7);
                TRANSPOSE4(alt0, alt1, alt2, alt3);
                TRANSPOSE4(alt4, alt5, alt6, alt7);
            }
        }
    }

    // perform the quantize step, using the floating point data
    // still in the row/alt registers
    {
        const int* biasAddr;
        const vector signed int* qmat;
        vector float bias, negBias;

        if (s->mb_intra) {
            vector signed int baseVector;

            // We must cache element 0 in the intra case
            // (it needs special handling).
            baseVector = vec_cts(vec_splat(row0, 0), 0);
            vec_ste(baseVector, 0, &oldBaseValue);

            qmat = (vector signed int*)s->q_intra_matrix[qscale];
            biasAddr = &(s->intra_quant_bias);
        } else {
            qmat = (vector signed int*)s->q_inter_matrix[qscale];
            biasAddr = &(s->inter_quant_bias);
        }

        // Load the bias vector (We add 0.5 to the bias so that we're
                                // rounding when we convert to int, instead of flooring.)
        {
            vector signed int biasInt;
            const vector float negOneFloat = (vector float)FOUROF(-1.0f);
            LOAD4(biasInt, biasAddr);
            bias = vec_ctf(biasInt, QUANT_BIAS_SHIFT);
            negBias = vec_madd(bias, negOneFloat, zero);
        }

        {
            vector float q0, q1, q2, q3, q4, q5, q6, q7;

            q0 = vec_ctf(qmat[0], QMAT_SHIFT);
            q1 = vec_ctf(qmat[2], QMAT_SHIFT);
            q2 = vec_ctf(qmat[4], QMAT_SHIFT);
            q3 = vec_ctf(qmat[6], QMAT_SHIFT);
            q4 = vec_ctf(qmat[8], QMAT_SHIFT);
            q5 = vec_ctf(qmat[10], QMAT_SHIFT);
            q6 = vec_ctf(qmat[12], QMAT_SHIFT);
            q7 = vec_ctf(qmat[14], QMAT_SHIFT);

            row0 = vec_sel(vec_madd(row0, q0, negBias), vec_madd(row0, q0, bias),
                    vec_cmpgt(row0, zero));
            row1 = vec_sel(vec_madd(row1, q1, negBias), vec_madd(row1, q1, bias),
                    vec_cmpgt(row1, zero));
            row2 = vec_sel(vec_madd(row2, q2, negBias), vec_madd(row2, q2, bias),
                    vec_cmpgt(row2, zero));
            row3 = vec_sel(vec_madd(row3, q3, negBias), vec_madd(row3, q3, bias),
                    vec_cmpgt(row3, zero));
            row4 = vec_sel(vec_madd(row4, q4, negBias), vec_madd(row4, q4, bias),
                    vec_cmpgt(row4, zero));
            row5 = vec_sel(vec_madd(row5, q5, negBias), vec_madd(row5, q5, bias),
                    vec_cmpgt(row5, zero));
            row6 = vec_sel(vec_madd(row6, q6, negBias), vec_madd(row6, q6, bias),
                    vec_cmpgt(row6, zero));
            row7 = vec_sel(vec_madd(row7, q7, negBias), vec_madd(row7, q7, bias),
                    vec_cmpgt(row7, zero));

            q0 = vec_ctf(qmat[1], QMAT_SHIFT);
            q1 = vec_ctf(qmat[3], QMAT_SHIFT);
            q2 = vec_ctf(qmat[5], QMAT_SHIFT);
            q3 = vec_ctf(qmat[7], QMAT_SHIFT);
            q4 = vec_ctf(qmat[9], QMAT_SHIFT);
            q5 = vec_ctf(qmat[11], QMAT_SHIFT);
            q6 = vec_ctf(qmat[13], QMAT_SHIFT);
            q7 = vec_ctf(qmat[15], QMAT_SHIFT);

            alt0 = vec_sel(vec_madd(alt0, q0, negBias), vec_madd(alt0, q0, bias),
                    vec_cmpgt(alt0, zero));
            alt1 = vec_sel(vec_madd(alt1, q1, negBias), vec_madd(alt1, q1, bias),
                    vec_cmpgt(alt1, zero));
            alt2 = vec_sel(vec_madd(alt2, q2, negBias), vec_madd(alt2, q2, bias),
                    vec_cmpgt(alt2, zero));
            alt3 = vec_sel(vec_madd(alt3, q3, negBias), vec_madd(alt3, q3, bias),
                    vec_cmpgt(alt3, zero));
            alt4 = vec_sel(vec_madd(alt4, q4, negBias), vec_madd(alt4, q4, bias),
                    vec_cmpgt(alt4, zero));
            alt5 = vec_sel(vec_madd(alt5, q5, negBias), vec_madd(alt5, q5, bias),
                    vec_cmpgt(alt5, zero));
            alt6 = vec_sel(vec_madd(alt6, q6, negBias), vec_madd(alt6, q6, bias),
                    vec_cmpgt(alt6, zero));
            alt7 = vec_sel(vec_madd(alt7, q7, negBias), vec_madd(alt7, q7, bias),
                    vec_cmpgt(alt7, zero));
        }


    }

    // Store the data back into the original block
    {
        vector signed short data0, data1, data2, data3, data4, data5, data6, data7;

        data0 = vec_pack(vec_cts(row0, 0), vec_cts(alt0, 0));
        data1 = vec_pack(vec_cts(row1, 0), vec_cts(alt1, 0));
        data2 = vec_pack(vec_cts(row2, 0), vec_cts(alt2, 0));
        data3 = vec_pack(vec_cts(row3, 0), vec_cts(alt3, 0));
        data4 = vec_pack(vec_cts(row4, 0), vec_cts(alt4, 0));
        data5 = vec_pack(vec_cts(row5, 0), vec_cts(alt5, 0));
        data6 = vec_pack(vec_cts(row6, 0), vec_cts(alt6, 0));
        data7 = vec_pack(vec_cts(row7, 0), vec_cts(alt7, 0));

        {
            // Clamp for overflow
            vector signed int max_q_int, min_q_int;
            vector signed short max_q, min_q;

            LOAD4(max_q_int, &(s->max_qcoeff));
            LOAD4(min_q_int, &(s->min_qcoeff));

            max_q = vec_pack(max_q_int, max_q_int);
            min_q = vec_pack(min_q_int, min_q_int);

            data0 = vec_max(vec_min(data0, max_q), min_q);
            data1 = vec_max(vec_min(data1, max_q), min_q);
            data2 = vec_max(vec_min(data2, max_q), min_q);
            data4 = vec_max(vec_min(data4, max_q), min_q);
            data5 = vec_max(vec_min(data5, max_q), min_q);
            data6 = vec_max(vec_min(data6, max_q), min_q);
            data7 = vec_max(vec_min(data7, max_q), min_q);
        }

        {
        vector bool char zero_01, zero_23, zero_45, zero_67;
        vector signed char scanIndexes_01, scanIndexes_23, scanIndexes_45, scanIndexes_67;
        vector signed char negOne = vec_splat_s8(-1);
        vector signed char* scanPtr =
                (vector signed char*)(s->intra_scantable.inverse);
        signed char lastNonZeroChar;

        // Determine the largest non-zero index.
        zero_01 = vec_pack(vec_cmpeq(data0, (vector signed short)zero),
                vec_cmpeq(data1, (vector signed short)zero));
        zero_23 = vec_pack(vec_cmpeq(data2, (vector signed short)zero),
                vec_cmpeq(data3, (vector signed short)zero));
        zero_45 = vec_pack(vec_cmpeq(data4, (vector signed short)zero),
                vec_cmpeq(data5, (vector signed short)zero));
        zero_67 = vec_pack(vec_cmpeq(data6, (vector signed short)zero),
                vec_cmpeq(data7, (vector signed short)zero));

        // 64 biggest values
        scanIndexes_01 = vec_sel(scanPtr[0], negOne, zero_01);
        scanIndexes_23 = vec_sel(scanPtr[1], negOne, zero_23);
        scanIndexes_45 = vec_sel(scanPtr[2], negOne, zero_45);
        scanIndexes_67 = vec_sel(scanPtr[3], negOne, zero_67);

        // 32 largest values
        scanIndexes_01 = vec_max(scanIndexes_01, scanIndexes_23);
        scanIndexes_45 = vec_max(scanIndexes_45, scanIndexes_67);

        // 16 largest values
        scanIndexes_01 = vec_max(scanIndexes_01, scanIndexes_45);

        // 8 largest values
        scanIndexes_01 = vec_max(vec_mergeh(scanIndexes_01, negOne),
                vec_mergel(scanIndexes_01, negOne));

        // 4 largest values
        scanIndexes_01 = vec_max(vec_mergeh(scanIndexes_01, negOne),
                vec_mergel(scanIndexes_01, negOne));

        // 2 largest values
        scanIndexes_01 = vec_max(vec_mergeh(scanIndexes_01, negOne),
                vec_mergel(scanIndexes_01, negOne));

        // largest value
        scanIndexes_01 = vec_max(vec_mergeh(scanIndexes_01, negOne),
                vec_mergel(scanIndexes_01, negOne));

        scanIndexes_01 = vec_splat(scanIndexes_01, 0);


        vec_ste(scanIndexes_01, 0, &lastNonZeroChar);

        lastNonZero = lastNonZeroChar;

        // While the data is still in vectors we check for the transpose IDCT permute
        // and handle it using the vector unit if we can.  This is the permute used
        // by the altivec idct, so it is common when using the altivec dct.

        if ((lastNonZero > 0) && (s->dsp.idct_permutation_type == FF_TRANSPOSE_IDCT_PERM)) {
            TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7);
        }

        vec_st(data0, 0, data);
        vec_st(data1, 16, data);
        vec_st(data2, 32, data);
        vec_st(data3, 48, data);
        vec_st(data4, 64, data);
        vec_st(data5, 80, data);
        vec_st(data6, 96, data);
        vec_st(data7, 112, data);
        }
    }

    // special handling of block[0]
    if (s->mb_intra) {
        if (!s->h263_aic) {
            if (n < 4)
                oldBaseValue /= s->y_dc_scale;
            else
                oldBaseValue /= s->c_dc_scale;
        }

        // Divide by 8, rounding the result
        data[0] = (oldBaseValue + 4) >> 3;
    }

    // We handled the transpose permutation above and we don't
    // need to permute the "no" permutation case.
    if ((lastNonZero > 0) &&
        (s->dsp.idct_permutation_type != FF_TRANSPOSE_IDCT_PERM) &&
        (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)) {
        ff_block_permute(data, s->dsp.idct_permutation,
                s->intra_scantable.scantable, lastNonZero);
    }

    return lastNonZero;
}
Example #28
0
/*
  altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
  to preserve proper dst alignment.
*/
void ff_gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
{
    const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder;
    const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] =
        {
            (16-x16)*(16-y16), /* A */
            (   x16)*(16-y16), /* B */
            (16-x16)*(   y16), /* C */
            (   x16)*(   y16), /* D */
            0, 0, 0, 0         /* padding */
        };
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
    register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8);
    register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
    register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
    int i;
    unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
    unsigned long src_really_odd = (unsigned long)src & 0x0000000F;

    tempA = vec_ld(0, (unsigned short*)ABCD);
    Av = vec_splat(tempA, 0);
    Bv = vec_splat(tempA, 1);
    Cv = vec_splat(tempA, 2);
    Dv = vec_splat(tempA, 3);

    rounderV = vec_splat((vec_u16)vec_lde(0, &rounder_a), 0);

    // we'll be able to pick-up our 9 char elements
    // at src from those 32 bytes
    // we load the first batch here, as inside the loop
    // we can re-use 'src+stride' from one iteration
    // as the 'src' of the next.
    src_0 = vec_ld(0, src);
    src_1 = vec_ld(16, src);
    srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));

    if (src_really_odd != 0x0000000F) {
        // if src & 0xF == 0xF, then (src+1) is properly aligned
        // on the second vector.
        srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
    } else {
        srcvB = src_1;
    }
    srcvA = vec_mergeh(vczero, srcvA);
    srcvB = vec_mergeh(vczero, srcvB);

    for(i=0; i<h; i++) {
        dst_odd = (unsigned long)dst & 0x0000000F;
        src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;

        dstv = vec_ld(0, dst);

        // we we'll be able to pick-up our 9 char elements
        // at src + stride from those 32 bytes
        // then reuse the resulting 2 vectors srvcC and srcvD
        // as the next srcvA and srcvB
        src_0 = vec_ld(stride + 0, src);
        src_1 = vec_ld(stride + 16, src);
        srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));

        if (src_really_odd != 0x0000000F) {
            // if src & 0xF == 0xF, then (src+1) is properly aligned
            // on the second vector.
            srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
        } else {
            srcvD = src_1;
        }

        srcvC = vec_mergeh(vczero, srcvC);
        srcvD = vec_mergeh(vczero, srcvD);


        // OK, now we (finally) do the math :-)
        // those four instructions replaces 32 int muls & 32 int adds.
        // isn't AltiVec nice ?
        tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
        tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
        tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
        tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);

        srcvA = srcvC;
        srcvB = srcvD;

        tempD = vec_sr(tempD, vcsr8);

        dstv2 = vec_pack(tempD, (vector unsigned short)vczero);

        if (dst_odd) {
            dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
        } else {
            dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
        }

        vec_st(dstv2, 0, dst);

        dst += stride;
        src += stride;
    }
}
Example #29
0
static int yv12toyuy2_unscaled_altivec(SwsContext *c, const uint8_t *src[],
                                       int srcStride[], int srcSliceY,
                                       int srcSliceH, uint8_t *dstParam[],
                                       int dstStride_a[])
{
    uint8_t *dst = dstParam[0] + dstStride_a[0] * srcSliceY;
    // yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH,
    //            srcStride[0], srcStride[1], dstStride[0]);
    const uint8_t *ysrc   = src[0];
    const uint8_t *usrc   = src[1];
    const uint8_t *vsrc   = src[2];
    const int width       = c->srcW;
    const int height      = srcSliceH;
    const int lumStride   = srcStride[0];
    const int chromStride = srcStride[1];
    const int dstStride   = dstStride_a[0];
    const vector unsigned char yperm = vec_lvsl(0, ysrc);
    const int vertLumPerChroma       = 2;
    register unsigned int y;

    /* This code assumes:
     *
     * 1) dst is 16 bytes-aligned
     * 2) dstStride is a multiple of 16
     * 3) width is a multiple of 16
     * 4) lum & chrom stride are multiples of 8
     */

    for (y = 0; y < height; y++) {
        int i;
        for (i = 0; i < width - 31; i += 32) {
            const unsigned int j          = i >> 1;
            vector unsigned char v_yA     = vec_ld(i, ysrc);
            vector unsigned char v_yB     = vec_ld(i + 16, ysrc);
            vector unsigned char v_yC     = vec_ld(i + 32, ysrc);
            vector unsigned char v_y1     = vec_perm(v_yA, v_yB, yperm);
            vector unsigned char v_y2     = vec_perm(v_yB, v_yC, yperm);
            vector unsigned char v_uA     = vec_ld(j, usrc);
            vector unsigned char v_uB     = vec_ld(j + 16, usrc);
            vector unsigned char v_u      = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
            vector unsigned char v_vA     = vec_ld(j, vsrc);
            vector unsigned char v_vB     = vec_ld(j + 16, vsrc);
            vector unsigned char v_v      = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
            vector unsigned char v_uv_a   = vec_mergeh(v_u, v_v);
            vector unsigned char v_uv_b   = vec_mergel(v_u, v_v);
            vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
            vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
            vector unsigned char v_yuy2_2 = vec_mergeh(v_y2, v_uv_b);
            vector unsigned char v_yuy2_3 = vec_mergel(v_y2, v_uv_b);
            vec_st(v_yuy2_0, (i << 1), dst);
            vec_st(v_yuy2_1, (i << 1) + 16, dst);
            vec_st(v_yuy2_2, (i << 1) + 32, dst);
            vec_st(v_yuy2_3, (i << 1) + 48, dst);
        }
        if (i < width) {
            const unsigned int j          = i >> 1;
            vector unsigned char v_y1     = vec_ld(i, ysrc);
            vector unsigned char v_u      = vec_ld(j, usrc);
            vector unsigned char v_v      = vec_ld(j, vsrc);
            vector unsigned char v_uv_a   = vec_mergeh(v_u, v_v);
            vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
            vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
            vec_st(v_yuy2_0, (i << 1), dst);
            vec_st(v_yuy2_1, (i << 1) + 16, dst);
        }
        if ((y & (vertLumPerChroma - 1)) == vertLumPerChroma - 1) {
            usrc += chromStride;
            vsrc += chromStride;
        }
        ysrc += lumStride;
        dst  += dstStride;
    }

    return srcSliceH;
}
Example #30
0
void YUV422_to_BGRA_altivec(const unsigned char *yuvdata,
			    size_t pixelnum, unsigned char *output)
{
  const vector unsigned char *UYVY_ptr=reinterpret_cast<const vector unsigned char *>(yuvdata);
  vector unsigned char *BGRA_ptr=reinterpret_cast<vector unsigned char *>(output);

  vector unsigned int vShift;
  vector signed short tempU, tempV, tempY, tempUV, out1, out2, out3, out4;

  vector signed short  v16, v128, a255, szero, one;
  vector unsigned char zero;
  vector signed short t0, t1, t2, tempGB1, tempGB2, tempRA1, tempRA2;
  vector signed short vU_G, vV_G, vU_B, vU_R, y0, hiImage, loImage;
  vector unsigned int   uv_rEven, uv_rOdd, uv_rHi, uv_rLo,
					  uv_gUEven, uv_gVEven, uv_gUOdd, uv_gVOdd, uv_gHi, uv_gLo,
					  uv_bEven, uv_bOdd;
  vector signed int	tempUhi, tempUlo, tempVhi, tempVlo;
  vector signed int yEven, yOdd;

  vector unsigned int t0Even, t0Odd, t1Even, t1Odd, t2Even, t2Odd;

  /* Load the equation constants. */
  vector signed short vConst =
    static_cast<vector signed short>(298, 519, 409, 16, 128, 255, -100, -210 );

  vector unsigned char vPerm1 =
    static_cast<vector unsigned char>( 0, 1, 16, 17,  8,  9, 24, 25,
                            2, 3, 18, 19, 10, 11, 26, 27 );
  vector unsigned char vPerm2 =
    static_cast<vector unsigned char>( 4, 5, 20, 21, 12, 13, 28, 29,
							6, 7, 22, 23, 14, 15, 30, 31 );

  vector unsigned char vPermY =
    static_cast<vector unsigned char>(  2,  3,  6,  7, 10, 11, 14, 15,
	                        18, 19, 22, 23, 26, 27, 30, 31 );
  vector unsigned char vPermU =
    static_cast<vector unsigned char>(  0,  1, 16, 17,  4,  5, 20, 21,
	                         8,  9, 24, 25, 12, 13, 28, 29 );
  vector unsigned char vPermV =
    static_cast<vector unsigned char>(  2,  3, 18, 19,  6,  7, 22, 23,
							10, 11, 26, 27, 14, 15, 30, 31 );
  vector unsigned char vOutPerm1 =
    static_cast<vector unsigned char>(  0,  1,  2,  3, 16, 17, 18, 19,
	                         4,  5,  6,  7, 20, 21, 22, 23 );
  vector unsigned char vOutPerm2 =
    static_cast<vector unsigned char>(  8,  9, 10, 11, 24, 25, 26, 27,
	                        12, 13, 14, 15, 28, 29, 30, 31 );
  vector unsigned char uvPerm =
    static_cast<vector unsigned char>(  0,  1,  4,  5,  8,  9, 12, 13,
	                        16, 17, 20, 21, 24, 25, 28, 29 );

  zero   = vec_splat_u8(0);
  szero  = vec_splat_s16(0);
  one    = vec_splat_s16(1);
  vShift = vec_splat_u32(8);
  a255   = vec_splat( vConst, 5 ); // alpha channel = 255
  vU_G   = vec_splat( vConst, 6 ); // -100
  vV_G   = vec_splat( vConst, 7 ); // -210
  vU_B   = vec_splat( vConst, 1 ); // 519
  vU_R   = vec_splat( vConst, 2 ); // 409
  y0     = vec_splat( vConst, 0 ); // 298
  v16    = vec_splat( vConst, 3 ); //  16
  v128   = vec_splat( vConst, 4 ); // 128

  for ( unsigned int i = 0; i < (pixelnum/sizeof(vector unsigned char)); i++ ) {

    // Load UYUV input vector
	const vector unsigned char *vec1 = UYVY_ptr++;

	//expand the UInt8's to short's
	hiImage = static_cast<vector signed short>(vec_mergeh( zero, *vec1 ));
	loImage = static_cast<vector signed short>(vec_mergel( zero, *vec1 ));

	tempUV = static_cast<vector signed short>(vec_perm( hiImage, loImage, uvPerm ));
	tempY  = static_cast<vector signed short>(vec_perm( hiImage, loImage, vPermY ));

	// subtract UV_OFFSET from UV's  (should this be saturated?)
	tempUV = static_cast<vector signed short>(vec_sub( tempUV, v128 ));
	// subtract Y-OFFSET from Y's    (should this be saturated?)
	tempY  = static_cast<vector signed short>(vec_sub( tempY, v16 ));

	// expand to UUUU UUUU and VVVV VVVV
	tempU = vec_perm(tempUV, tempUV, vPermU);
	tempV = vec_perm(tempUV, tempUV, vPermV);
	//below:
	//
	//error: cannot convert `vector int' to `vector unsigned int' in assignment
	tempUhi = vec_mule( tempU, one );
	// unsigned int = vec_mule( signed short, signed short )
	// should be
	// signed int = vec_mule( signed short, signed short )
	tempUlo = vec_mulo( tempU, one );
	tempVhi = vec_mule( tempV, one );
	tempVlo = vec_mulo( tempV, one );

	// uv_r = YUV2RGB_12*u + YUV2RGB_13*v
	// uv_r = (-1)*u + 409*v (or "409*V - U")
	uv_rEven = vec_mule( tempV, vU_R );
	uv_rOdd  = vec_mulo( tempV, vU_R );
	uv_rHi = vec_sub( uv_rEven, tempUhi );
	uv_rLo = vec_sub( uv_rOdd, tempUlo );

	// uv_g = YUV2RGB_22*u + YUV2RGB_23*v
	// uv_g = -100*u + (-210)*v
	// multiply U by -100
	uv_gUEven = vec_mule( tempU, vU_G );
	uv_gUOdd  = vec_mulo( tempU, vU_G );
	// multiply V by -210
	uv_gVEven = vec_mule( tempV, vV_G );
	uv_gVOdd  = vec_mulo( tempV, vV_G );
	// add U & V products
	uv_gHi   = vec_add( uv_gUEven, uv_gVEven );
	uv_gLo   = vec_add( uv_gUOdd, uv_gVOdd );

	// uv_b = YUV2RGB_32*u + YUV2RGB_33*v
	// uv_b = 519*u + 0*v
	uv_bEven = vec_mule( tempU, vU_B );
	uv_bOdd  = vec_mulo( tempU, vU_B );

	// y = YUV2RGB_11 * tempY
	// y = 298* (tempY - 16)
	yEven = vec_mule( tempY, y0 );
	yOdd  = vec_mulo( tempY, y0 );

	// add while int's
	t0Even = vec_add( yEven, uv_bEven );
	t0Odd  = vec_add( yOdd, uv_bOdd );
	t1Even = vec_add( yEven, uv_gHi );
	t1Odd  = vec_add( yOdd, uv_gLo );
	t2Even = vec_add( yEven, uv_rHi );
	t2Odd  = vec_add( yOdd, uv_rLo );

	// shift while int's
	t0Even = vec_sra( t0Even, vShift );
	t0Odd  = vec_sra( t0Odd,  vShift );
	t1Even = vec_sra( t1Even, vShift );
	t1Odd  = vec_sra( t1Odd,  vShift );
	t2Even = vec_sra( t2Even, vShift );
	t2Odd  = vec_sra( t2Odd,  vShift );

	// pack down to shorts
	t0 = vec_packs( t0Even, t0Odd );
	t1 = vec_packs( t1Even, t1Odd );
	t2 = vec_packs( t2Even, t2Odd );

	// Permute to GBGBGBGB GBGBGBGB + re-interleave even & odd
	tempGB1 = vec_perm( t1,   t0, vPerm1 );
	tempGB2 = vec_perm( t1,   t0, vPerm2 );
	// Permute to ARARARAR ARARARAR + re-interleave even & odd
	tempRA1 = vec_perm( a255, t2, vPerm1 );
	tempRA2 = vec_perm( a255, t2, vPerm2 );

	// Permute to ARGB's
	out1 = vec_perm( tempRA1, tempGB1, vOutPerm1 );
	out2 = vec_perm( tempRA1, tempGB1, vOutPerm2 );
	out3 = vec_perm( tempRA2, tempGB2, vOutPerm1 );
	out4 = vec_perm( tempRA2, tempGB2, vOutPerm2 );

	// pack down to char's
	*BGRA_ptr = vec_packsu( out1, out2 );
	BGRA_ptr++;
	*BGRA_ptr = vec_packsu( out3, out4 );
	BGRA_ptr++;
  }
}