コード例 #1
0
 template<bool align> SIMD_INLINE v128_u8 BlurRow(const Buffer & buffer, size_t offset)
 {
     v128_u16 lo = DivideBy16(BinomialSum(Load<align>(buffer.src0 + offset), Load<align>(buffer.src1 + offset), Load<align>(buffer.src2 + offset)));
     offset += HA;
     v128_u16 hi = DivideBy16(BinomialSum(Load<align>(buffer.src0 + offset), Load<align>(buffer.src1 + offset), Load<align>(buffer.src2 + offset)));
     return vec_pack(lo, hi);
 }
コード例 #2
0
void
transfer8x8_copy_altivec_c( uint8_t * dst,
                            uint8_t * src,
                            uint32_t stride)
{
    register vector unsigned char tmp;
    register vector unsigned char mask;
	register vector unsigned char t0, t1;
    
#ifdef DEBUG
    if(((unsigned long)dst) & 0x7)
        fprintf(stderr, "transfer8x8_copy_altivec:incorrect align, dst: %lx\n", (long)dst);
    if(stride & 0x7)
        fprintf(stderr, "transfer8x8_copy_altivec:incorrect stride, stride: %u\n", stride);
#endif
    mask = vec_pack(vec_splat_u16(0), vec_splat_u16(-1));
    
    COPY8TO8();
    COPY8TO8();
    COPY8TO8();
    COPY8TO8();
    
    COPY8TO8();
    COPY8TO8();
    COPY8TO8();
    COPY8TO8();
}
コード例 #3
0
void
transfer_8to16sub_altivec_c(int16_t * dct,
							uint8_t * cur,
							uint8_t * ref,
							const uint32_t stride)
{
	register vector unsigned char c,r;
	register vector unsigned char ox00;
	register vector unsigned char mask_00ff;
	register vector unsigned char mask;
	register vector signed short cs,rs;
	
#ifdef DEBUG
	if((long)dct & 0xf)
		fprintf(stderr, "transfer_8to16sub_altivec_c:incorrect align, dct: %lx\n", (long)dct);
	if((long)cur & 0x7)
		fprintf(stderr, "transfer_8to16sub_altivec_c:incorrect align, cur: %lx\n", (long)cur);
	if(stride & 0x7)
		fprintf(stderr, "transfer_8to16sub_altivec_c:incorrect stride, stride: %lu\n", (long)stride);
#endif
	/* initialize */
	ox00 = vec_splat_u8(0);
	mask_00ff = vec_pack((vector unsigned short)ox00,vec_splat_u16(-1));
	
	SUB8TO16();
	SUB8TO16();
	SUB8TO16();
	SUB8TO16();
	
	SUB8TO16();
	SUB8TO16();
	SUB8TO16();
	SUB8TO16();
}
コード例 #4
0
ファイル: sad_altivec.c プロジェクト: Distrotech/xvidcore
uint32_t
sad8_altivec_c(const uint8_t * cur,
	   const uint8_t *ref,
	   const uint32_t stride)
{
	uint32_t result = 0;
	
	register vector unsigned int sad;
	register vector unsigned char c;
	register vector unsigned char r;
	
	/* initialize */
	sad = vec_splat_u32(0);
	
	/* Perform sad operations */
	SAD8();
	SAD8();
	SAD8();
	SAD8();
	
	SAD8();
	SAD8();
	SAD8();
	SAD8();
	
	/* finish addition, add the first 2 together */
	sad = vec_and(sad, (vector unsigned int)vec_pack(vec_splat_u16(-1),vec_splat_u16(0)));
	sad = (vector unsigned int)vec_sums((vector signed int)sad, vec_splat_s32(0));
	sad = vec_splat(sad,3);
	vec_ste(sad, 0, &result);
		
	return result;
}
コード例 #5
0
void transfer_16to8copy_altivec_c(uint8_t *dst,
                            vector signed short *src,
                            uint32_t stride)
{
    register vector signed short s;
    register vector unsigned char packed;
    register vector unsigned char mask_stencil;
    register vector unsigned char mask;
    register vector unsigned char load_src_perm;
    
#ifdef DEBUG
    /* if this is on, print alignment errors */
    if(((unsigned long) dst) & 0x7)
        fprintf(stderr, "transfer_16to8copy_altivec:incorrect align, dst %lx\n", (long)dst);
    if(stride & 0x7)
        fprintf(stderr, "transfer_16to8copy_altivec:incorrect align, stride %u\n", stride);
#endif
    /* Initialisation stuff */
    load_src_perm = vec_lvsl(0, (unsigned char*)src);
    mask_stencil = vec_pack(vec_splat_u16(0), vec_splat_u16(-1));
    
    COPY16TO8();
    COPY16TO8();
    COPY16TO8();
    COPY16TO8();
    
    COPY16TO8();
    COPY16TO8();
    COPY16TO8();
    COPY16TO8();
}
コード例 #6
0
        template <bool align, bool first> SIMD_INLINE void BgrToYuv422p(const uint8_t * bgr, Storer<align> & y, Storer<align> & u, Storer<align> & v)
        {
            v128_u8 blue[2], green[2], red[2];
            LoadBgr<align>(bgr, blue[0], green[0], red[0]);
            Store<align, first>(y, BgrToY(blue[0], green[0], red[0]));
            LoadBgr<align>(bgr + 3 * A, blue[1], green[1], red[1]);
            Store<align, false>(y, BgrToY(blue[1], green[1], red[1]));

            v128_s16 blueAvg[2], greenAvg[2], redAvg[2];
            blueAvg[0] = Average(blue[0]);
            blueAvg[1] = Average(blue[1]);
            greenAvg[0] = Average(green[0]);
            greenAvg[1] = Average(green[1]);
            redAvg[0] = Average(red[0]);
            redAvg[1] = Average(red[1]);
            Store<align, first>(u, vec_pack(BgrToU(blueAvg[0], greenAvg[0], redAvg[0]), BgrToU(blueAvg[1], greenAvg[1], redAvg[1])));
            Store<align, first>(v, vec_pack(BgrToV(blueAvg[0], greenAvg[0], redAvg[0]), BgrToV(blueAvg[1], greenAvg[1], redAvg[1])));
        }
コード例 #7
0
 SIMD_INLINE v128_u8 Average8(const v128_u8 & s00, const v128_u8 & s01, const v128_u8 & s10, const v128_u8 & s11)
 {
     v128_u16 lo = Average16(
         vec_mule(s00, K8_01), vec_mulo(s00, K8_01), 
         vec_mule(s10, K8_01), vec_mulo(s10, K8_01)); 
     v128_u16 hi = Average16(
         vec_mule(s01, K8_01), vec_mulo(s01, K8_01), 
         vec_mule(s11, K8_01), vec_mulo(s11, K8_01)); 
     return vec_pack(lo, hi);
 }
コード例 #8
0
void
jsimd_h2v1_downsample_altivec (JDIMENSION image_width, int max_v_samp_factor,
                               JDIMENSION v_samp_factor,
                               JDIMENSION width_blocks,
                               JSAMPARRAY input_data, JSAMPARRAY output_data)
{
  int outrow, outcol;
  JDIMENSION output_cols = width_blocks * DCTSIZE;
  JSAMPROW inptr, outptr;

  __vector unsigned char this0, next0, out;
  __vector unsigned short this0e, this0o, next0e, next0o, outl, outh;

  /* Constants */
  __vector unsigned short pw_bias = { __4X2(0, 1) },
    pw_one = { __8X(1) };
  __vector unsigned char even_odd_index =
    {0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15},
    pb_zero = { __16X(0) };

  expand_right_edge(input_data, max_v_samp_factor, image_width,
                    output_cols * 2);

  for (outrow = 0; outrow < v_samp_factor; outrow++) {
    outptr = output_data[outrow];
    inptr = input_data[outrow];

    for (outcol = output_cols; outcol > 0;
         outcol -= 16, inptr += 32, outptr += 16) {

      this0 = vec_ld(0, inptr);
      this0 = vec_perm(this0, this0, even_odd_index);
      this0e = (__vector unsigned short)VEC_UNPACKHU(this0);
      this0o = (__vector unsigned short)VEC_UNPACKLU(this0);
      outl = vec_add(this0e, this0o);
      outl = vec_add(outl, pw_bias);
      outl = vec_sr(outl, pw_one);

      if (outcol > 8) {
        next0 = vec_ld(16, inptr);
        next0 = vec_perm(next0, next0, even_odd_index);
        next0e = (__vector unsigned short)VEC_UNPACKHU(next0);
        next0o = (__vector unsigned short)VEC_UNPACKLU(next0);
        outh = vec_add(next0e, next0o);
        outh = vec_add(outh, pw_bias);
        outh = vec_sr(outh, pw_one);
      } else
        outh = vec_splat_u16(0);

      out = vec_pack(outl, outh);
      vec_st(out, 0, outptr);
    }
  }
}
コード例 #9
0
ファイル: predict.c プロジェクト: UIKit0/H.264-in-CUDA
static void predict_16x16_p_altivec( uint8_t *src )
{
    int16_t a, b, c, i;
    int H = 0;
    int V = 0;
    int16_t i00;

    for( i = 1; i <= 8; i++ )
    {
        H += i * ( src[7+i - FDEC_STRIDE ]  - src[7-i - FDEC_STRIDE ] );
        V += i * ( src[(7+i)*FDEC_STRIDE -1] - src[(7-i)*FDEC_STRIDE -1] );
    }

    a = 16 * ( src[15*FDEC_STRIDE -1] + src[15 - FDEC_STRIDE] );
    b = ( 5 * H + 32 ) >> 6;
    c = ( 5 * V + 32 ) >> 6;
    i00 = a - b * 7 - c * 7 + 16;

    vect_sshort_u i00_u, b_u, c_u;
    i00_u.s[0] = i00;
    b_u.s[0]   = b;
    c_u.s[0]   = c;

    vec_u16_t val5_v = vec_splat_u16(5);
    vec_s16_t i00_v, b_v, c_v;
    i00_v = vec_splat(i00_u.v, 0);
    b_v = vec_splat(b_u.v, 0);
    c_v = vec_splat(c_u.v, 0);
    vec_s16_t induc_v  = (vec_s16_t) CV(0,  1,  2,  3,  4,  5,  6,  7);
    vec_s16_t b8_v = vec_sl(b_v, vec_splat_u16(3));
    vec_s32_t mule_b_v = vec_mule(induc_v, b_v);
    vec_s32_t mulo_b_v = vec_mulo(induc_v, b_v);
    vec_s16_t mul_b_induc0_v = vec_pack(vec_mergeh(mule_b_v, mulo_b_v), vec_mergel(mule_b_v, mulo_b_v));
    vec_s16_t add_i0_b_0v = vec_adds(i00_v, mul_b_induc0_v);
    vec_s16_t add_i0_b_8v = vec_adds(b8_v, add_i0_b_0v);

    int y;

    for( y = 0; y < 16; y++ )
    {
        vec_s16_t shift_0_v = vec_sra(add_i0_b_0v, val5_v);
        vec_s16_t shift_8_v = vec_sra(add_i0_b_8v, val5_v);
        vec_u8_t com_sat_v = vec_packsu(shift_0_v, shift_8_v);
        vec_st( com_sat_v, 0, &src[0]);
        src += FDEC_STRIDE;
        i00 += c;
        add_i0_b_0v = vec_adds(add_i0_b_0v, c_v);
        add_i0_b_8v = vec_adds(add_i0_b_8v, c_v);
    }
}
コード例 #10
0
        template <bool align, bool first> SIMD_INLINE void BgrToYuv420p(const uint8_t * bgr0, size_t bgrStride,
            Storer<align> & y0, Storer<align> & y1, Storer<align> & u, Storer<align> & v)
        {
            const uint8_t * bgr1 = bgr0 + bgrStride;
            v128_u8 blue[2][2], green[2][2], red[2][2];
            LoadBgr<align>(bgr0, blue[0][0], green[0][0], red[0][0]);
            Store<align, first>(y0, BgrToY(blue[0][0], green[0][0], red[0][0]));
            LoadBgr<align>(bgr0 + 3 * A, blue[0][1], green[0][1], red[0][1]);
            Store<align, false>(y0, BgrToY(blue[0][1], green[0][1], red[0][1]));
            LoadBgr<align>(bgr1, blue[1][0], green[1][0], red[1][0]);
            Store<align, first>(y1, BgrToY(blue[1][0], green[1][0], red[1][0]));
            LoadBgr<align>(bgr1 + 3 * A, blue[1][1], green[1][1], red[1][1]);
            Store<align, false>(y1, BgrToY(blue[1][1], green[1][1], red[1][1]));

            v128_s16 blueAvg[2], greenAvg[2], redAvg[2];
            blueAvg[0] = Average(blue[0][0], blue[1][0]);
            blueAvg[1] = Average(blue[0][1], blue[1][1]);
            greenAvg[0] = Average(green[0][0], green[1][0]);
            greenAvg[1] = Average(green[0][1], green[1][1]);
            redAvg[0] = Average(red[0][0], red[1][0]);
            redAvg[1] = Average(red[0][1], red[1][1]);
            Store<align, first>(u, vec_pack(BgrToU(blueAvg[0], greenAvg[0], redAvg[0]), BgrToU(blueAvg[1], greenAvg[1], redAvg[1])));
            Store<align, first>(v, vec_pack(BgrToV(blueAvg[0], greenAvg[0], redAvg[0]), BgrToV(blueAvg[1], greenAvg[1], redAvg[1])));
        }
コード例 #11
0
void
transfer_8to16sub2_altivec_c(vector signed short *dct,
                             uint8_t *cur,
                             uint8_t *ref1,
                             uint8_t *ref2,
                             const uint32_t stride)
{
    vector unsigned char r1;
    vector unsigned char r2;
    vector unsigned char r;
    vector unsigned char c;
    vector unsigned char mask;
    vector signed short cs;
    vector signed short rs;
    
#ifdef DEBUG
    /* Dump alignment erros if DEBUG is set */
    if(((unsigned long)dct) & 0xf)
        fprintf(stderr, "transfer_8to16sub2_altivec_c:incorrect align, dct: %lx\n", (long)dct);
    if(((unsigned long)cur) & 0x7)
        fprintf(stderr, "transfer_8to16sub2_altivec_c:incorrect align, cur: %lx\n", (long)cur);
    if(stride & 0x7)
        fprintf(stderr, "transfer_8to16sub2_altivec_c:incorrect align, dct: %u\n", stride);
#endif
    
    /* Initialisation */
    mask = vec_pack(vec_splat_u16(0), vec_splat_u16(-1));
    
    SUB28TO16();
    SUB28TO16();
    SUB28TO16();
    SUB28TO16();
    
    SUB28TO16();
    SUB28TO16();
    SUB28TO16();
    SUB28TO16();
}
コード例 #12
0
ファイル: iquant_intra.c プロジェクト: jlehtine/yuvmotionfps
void iquant_intra_m1_altivec(IQUANT_INTRA_PDECL)
{
    int i;
    vector signed short vsrc;
    uint16_t *qmat;
    vector unsigned short vqmat;
    vector unsigned short vmquant;
    vector bool short eqzero, ltzero;
    vector signed short val, t0;
    vector signed short zero, one;
    vector unsigned int four;
    vector signed short min, max;
    int offset, offset2;
    int16_t dst0;
    union {
	vector unsigned short vu16;
	unsigned short mquant;
	vector signed int vs32;
	struct {
	    signed int pad[3];
	    signed int sum;
	} s;
    } vu;
#ifdef ALTIVEC_DST
    DataStreamControl dsc;
#endif

#ifdef ALTIVEC_VERIFY /* {{{ */
    if (NOT_VECTOR_ALIGNED(wsp->intra_q_mat))
	mjpeg_error_exit1("iquant_intra_m1: wsp->intra_q_mat %% 16 != 0, (%d)",
	    wsp->intra_q_mat);

    if (NOT_VECTOR_ALIGNED(src))
	mjpeg_error_exit1("iquant_intra_m1: src %% 16 != 0, (%d)", src);

    if (NOT_VECTOR_ALIGNED(dst))
	mjpeg_error_exit1("iquant_intra_m1: dst %% 16 != 0, (%d)", dst);

    for (i = 0; i < 64; i++)
	if (src[i] < -256 || src[i] > 255)
	    mjpeg_error_exit1("iquant_intra_m2: -256 > src[%i] > 255, (%d)",
		i, src[i]);
#endif /* }}} */

    AMBER_START;

    dst0 = src[0] << (3 - dc_prec);

    qmat = (uint16_t*)wsp->intra_q_mat;

#ifdef ALTIVEC_DST
    dsc.control = DATA_STREAM_CONTROL(64/8,1,0);
    vec_dst(src, dsc.control, 0);
    vec_dst(qmat, dsc.control, 1);
#endif

    /* vmquant = (vector unsigned short)(mquant); */
    vu.mquant = (unsigned short)mquant;
    vmquant = vec_splat(vu.vu16, 0);

    zero = vec_splat_s16(0);
    one = vec_splat_s16(1);
    four = vec_splat_u32(4);
    /* max = (2047); min = (-2048); {{{ */
    vu8(max) = vec_splat_u8(0x7);
    t0 = vec_splat_s16(-1); /* 0xffff */
    vu8(max) = vec_mergeh(vu8(max), vu8(t0)); /* 0x07ff == 2047 */
    min = vec_sub(t0, max);
    /* }}} */
    offset = 0;

#if 1
    vsrc = vec_ld(offset, (signed short*)src);
    vqmat = vec_ld(offset, (unsigned short*)qmat);
    i = (64/8) - 1;
    do {
	/* intra_q[i] * mquant */
	vu16(vqmat) = vec_mulo(vu8(vqmat), vu8(vmquant));

	/* save sign */
	ltzero = vec_cmplt(vsrc, zero);
	eqzero = vec_cmpeq(vsrc, zero);

	/* val = abs(src) */
	t0 = vec_sub(zero, vsrc);
	val = vec_max(t0, vsrc);

	/* val = (src * quant) >> 4 */
	vs32(t0) = vec_mule(val, vs16(vqmat));
	vs32(val) = vec_mulo(val, vs16(vqmat));
	vs32(t0) = vec_sra(vs32(t0), four);
	vs16(t0) = vec_pack(vs32(t0), vs32(t0));
	vs32(val) = vec_sra(vs32(val), four);
	vs16(val) = vec_pack(vs32(val), vs32(val));
	val = vec_mergeh(vs16(t0), vs16(val));

	offset2 = offset;
	offset += 8*sizeof(int16_t);
	vsrc = vec_ld(offset, (signed short*)src);
	vqmat = vec_ld(offset, (unsigned short*)qmat);

	/* val = val - 1&~(val|val==0) */
	t0 = vec_or(val, eqzero);
	t0 = vec_andc(one, t0);
	val = vec_sub(val, t0);

	/* restore sign */
	t0 = vec_sub(zero, val);
	val = vec_sel(val, t0, ltzero);

	/* val = (val > 2047) ? ((val < -2048) ? -2048 : val); */
	val = vec_min(val, max);
	val = vec_max(val, min);

	vec_st(val, offset2, dst);
    } while (--i);
    /* intra_q[i] * mquant */
    vu16(vqmat) = vec_mulo(vu8(vqmat), vu8(vmquant));

    /* save sign */
    ltzero = vec_cmplt(vsrc, zero);
    eqzero = vec_cmpeq(vsrc, zero);

    /* val = abs(src) */
    t0 = vec_sub(zero, vsrc);
    val = vec_max(t0, vsrc);

    /* val = (src * quant) >> 4 */
    vs32(t0) = vec_mule(val, vs16(vqmat));
    vs32(val) = vec_mulo(val, vs16(vqmat));
    vs32(t0) = vec_sra(vs32(t0), four);
    vs16(t0) = vec_pack(vs32(t0), vs32(t0));
    vs32(val) = vec_sra(vs32(val), four);
    vs16(val) = vec_pack(vs32(val), vs32(val));
    val = vec_mergeh(vs16(t0), vs16(val));

    /* val = val - 1&~(val|val==0) */
    t0 = vec_or(val, eqzero);
    t0 = vec_andc(one, t0);
    val = vec_sub(val, t0);

    /* restore sign */
    t0 = vec_sub(zero, val);
    val = vec_sel(val, t0, ltzero);

    /* val = (val > 2047) ? ((val < -2048) ? -2048 : val); */
    val = vec_min(val, max);
    val = vec_max(val, min);

    vec_st(val, offset, dst);
#else
    /* {{{ */
    i = (64/8);
    do {
	vsrc = vec_ld(offset, (signed short*)src);
	vqmat = vec_ld(offset, (unsigned short*)qmat);

	/* intra_q[i] * mquant */
	vu16(vqmat) = vec_mulo(vu8(vqmat), vu8(vmquant));

	/* save sign */
	ltzero = vec_cmplt(vsrc, zero);
	eqzero = vec_cmpeq(vsrc, zero);

	/* val = abs(src) */
	t0 = vec_sub(zero, vsrc);
	val = vec_max(t0, vsrc);

	/* val = (src * quant) >> 4 */
	vs32(t0) = vec_mule(val, vs16(vqmat));
	vs32(val) = vec_mulo(val, vs16(vqmat));
	vs32(t0) = vec_sra(vs32(t0), four);
	vs16(t0) = vec_pack(vs32(t0), vs32(t0));
	vs32(val) = vec_sra(vs32(val), four);
	vs16(val) = vec_pack(vs32(val), vs32(val));
	val = vec_mergeh(vs16(t0), vs16(val));

	/* val = val - 1&~(val|val==0) */
	t0 = vec_or(val, eqzero);
	t0 = vec_andc(one, t0);
	val = vec_sub(val, t0);

	/* restore sign */
	t0 = vec_sub(zero, val);
	val = vec_sel(val, t0, ltzero);

	/* val = (val > 2047) ? ((val < -2048) ? -2048 : val); */
	val = vec_min(val, max);
	val = vec_max(val, min);

	vec_st(val, offset, dst);

	offset += 8*sizeof(int16_t);
    } while (--i);
    /* }}} */
#endif

    dst[0] = dst0;

    AMBER_STOP;
}
コード例 #13
0
ファイル: builtins-ppc-vsx.c プロジェクト: AlexDenisov/clang
void test1() {
// CHECK-LABEL: define void @test1
// CHECK-LE-LABEL: define void @test1

  res_vf = vec_abs(vf);
// CHECK: call <4 x float> @llvm.fabs.v4f32(<4 x float> %{{[0-9]*}})
// CHECK-LE: call <4 x float> @llvm.fabs.v4f32(<4 x float> %{{[0-9]*}})

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_add(vd, vd);
// CHECK: fadd <2 x double>
// CHECK-LE: fadd <2 x double>

  res_vd = vec_and(vbll, vd);
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
// CHECK-LE: and <2 x i64>
// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>

  res_vd = vec_and(vd, vbll);
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
// CHECK-LE: and <2 x i64>
// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>

  res_vd = vec_and(vd, vd);
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
// CHECK-LE: and <2 x i64>
// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_andc(vbll, vd);
// CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
// CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
// CHECK-LE: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
// CHECK-LE: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
// CHECK-LE: and <2 x i64>
// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_andc(vd, vbll);
// CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
// CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
// CHECK-LE: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
// CHECK-LE: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
// CHECK-LE: and <2 x i64>
// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>

  dummy();
// CHECK: call void @dummy()

  res_vd = vec_andc(vd, vd);
// CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
// CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_ceil(vd);
// CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{[0-9]*}})
// CHECK-LE: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{[0-9]*}})

  res_vf = vec_ceil(vf);
// CHECK: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{[0-9]*}})
// CHECK-LE: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{[0-9]*}})

  res_vbll = vec_cmpeq(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})

  res_vbi = vec_cmpeq(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})

  res_vbll = vec_cmpge(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})

  res_vbi = vec_cmpge(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})

  res_vbll = vec_cmpgt(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})

  res_vbi = vec_cmpgt(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})

  res_vbll = vec_cmple(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})

  res_vbi = vec_cmple(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})

  res_vbll = vec_cmplt(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})

  res_vbi = vec_cmplt(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})

  /* vec_cpsgn */
  res_vf = vec_cpsgn(vf, vf);
// CHECK: call <4 x float> @llvm.copysign.v4f32(<4 x float> %{{.+}}, <4 x float> %{{.+}})
// CHECK-LE: call <4 x float> @llvm.copysign.v4f32(<4 x float> %{{.+}}, <4 x float> %{{.+}})

  res_vd = vec_cpsgn(vd, vd);
// CHECK: call <2 x double> @llvm.copysign.v2f64(<2 x double> %{{.+}}, <2 x double> %{{.+}})
// CHECK-LE: call <2 x double> @llvm.copysign.v2f64(<2 x double> %{{.+}}, <2 x double> %{{.+}})

  /* vec_div */
  res_vsll = vec_div(vsll, vsll);
// CHECK: sdiv <2 x i64>
// CHECK-LE: sdiv <2 x i64>

  res_vull = vec_div(vull, vull);
// CHECK: udiv <2 x i64>
// CHECK-LE: udiv <2 x i64>

  res_vf = vec_div(vf, vf);
// CHECK: fdiv <4 x float>
// CHECK-LE: fdiv <4 x float>

  res_vd = vec_div(vd, vd);
// CHECK: fdiv <2 x double>
// CHECK-LE: fdiv <2 x double>

  /* vec_max */
  res_vf = vec_max(vf, vf);
// CHECK: @llvm.ppc.vsx.xvmaxsp
// CHECK-LE: @llvm.ppc.vsx.xvmaxsp

  res_vd = vec_max(vd, vd);
// CHECK: @llvm.ppc.vsx.xvmaxdp
// CHECK-LE: @llvm.ppc.vsx.xvmaxdp

  res_vf = vec_vmaxfp(vf, vf);
// CHECK: @llvm.ppc.vsx.xvmaxsp
// CHECK-LE: @llvm.ppc.vsx.xvmaxsp

  /* vec_min */
  res_vf = vec_min(vf, vf);
// CHECK: @llvm.ppc.vsx.xvminsp
// CHECK-LE: @llvm.ppc.vsx.xvminsp

  res_vd = vec_min(vd, vd);
// CHECK: @llvm.ppc.vsx.xvmindp
// CHECK-LE: @llvm.ppc.vsx.xvmindp

  res_vf = vec_vminfp(vf, vf);
// CHECK: @llvm.ppc.vsx.xvminsp
// CHECK-LE: @llvm.ppc.vsx.xvminsp

  res_d = __builtin_vsx_xsmaxdp(d, d);
// CHECK: @llvm.ppc.vsx.xsmaxdp
// CHECK-LE: @llvm.ppc.vsx.xsmaxdp

  res_d = __builtin_vsx_xsmindp(d, d);
// CHECK: @llvm.ppc.vsx.xsmindp
// CHECK-LE: @llvm.ppc.vsx.xsmindp

  /* vec_perm */
  res_vsll = vec_perm(vsll, vsll, vuc);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_perm(vull, vull, vuc);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vbll = vec_perm(vbll, vbll, vuc);
// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
// CHECK-LE: xor <16 x i8>
// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>

  res_vf = vec_round(vf);
// CHECK: call <4 x float> @llvm.round.v4f32(<4 x float>
// CHECK-LE: call <4 x float> @llvm.round.v4f32(<4 x float>

  res_vd = vec_round(vd);
// CHECK: call <2 x double> @llvm.round.v2f64(<2 x double>
// CHECK-LE: call <2 x double> @llvm.round.v2f64(<2 x double>

  res_vd = vec_perm(vd, vd, vuc);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vd = vec_splat(vd, 1);
// CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
// CHECK-LE: xor <16 x i8>
// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
// CHECK-LE: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>

  res_vbll = vec_splat(vbll, 1);
// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
// CHECK-LE: xor <16 x i8>
// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>

  res_vsll =  vec_splat(vsll, 1);
// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
// CHECK-LE: xor <16 x i8>
// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>

  res_vull =  vec_splat(vull, 1);
// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
// CHECK-LE: xor <16 x i8>
// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>

  res_vsi = vec_pack(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vui = vec_pack(vull, vull);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vbi = vec_pack(vbll, vbll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vsll = vec_vperm(vsll, vsll, vuc);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_vperm(vull, vull, vuc);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vd = vec_vperm(vd, vd, vuc);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  /* vec_vsx_ld */

  res_vsi = vec_vsx_ld(0, &vsi);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vui = vec_vsx_ld(0, &vui);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vf = vec_vsx_ld (0, &vf);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vsll = vec_vsx_ld(0, &vsll);
// CHECK: @llvm.ppc.vsx.lxvd2x
// CHECK-LE: @llvm.ppc.vsx.lxvd2x

  res_vull = vec_vsx_ld(0, &vull);
// CHECK: @llvm.ppc.vsx.lxvd2x
// CHECK-LE: @llvm.ppc.vsx.lxvd2x

  res_vd = vec_vsx_ld(0, &vd);
// CHECK: @llvm.ppc.vsx.lxvd2x
// CHECK-LE: @llvm.ppc.vsx.lxvd2x

  res_vull = vec_vsx_ld(0, &vull);
// CHECK: @llvm.ppc.vsx.lxvd2x
// CHECK-LE: @llvm.ppc.vsx.lxvd2x

  res_vd = vec_vsx_ld(0, &vd);
// CHECK: @llvm.ppc.vsx.lxvd2x
// CHECK-LE: @llvm.ppc.vsx.lxvd2x

  res_vss = vec_vsx_ld(0, &vss);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vss = vec_vsx_ld(0, &ss);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vus = vec_vsx_ld(0, &vus);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vus = vec_vsx_ld(0, &us);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vbc = vec_vsx_ld(0, &vbc);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vsc = vec_vsx_ld(0, &vsc);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vuc = vec_vsx_ld(0, &vuc);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vsc = vec_vsx_ld(0, &sc);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  res_vuc = vec_vsx_ld(0, &uc);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x

  /* vec_vsx_st */

  vec_vsx_st(vsi, 0, &res_vsi);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vsi, 0, &res_si);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vui, 0, &res_vui);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vui, 0, &res_ui);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vf, 0, &res_vf);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vsll, 0, &res_vsll);
// CHECK: @llvm.ppc.vsx.stxvd2x
// CHECK-LE: @llvm.ppc.vsx.stxvd2x

  vec_vsx_st(vull, 0, &res_vull);
// CHECK: @llvm.ppc.vsx.stxvd2x
// CHECK-LE: @llvm.ppc.vsx.stxvd2x

  vec_vsx_st(vd, 0, &res_vd);
// CHECK: @llvm.ppc.vsx.stxvd2x
// CHECK-LE: @llvm.ppc.vsx.stxvd2x

  vec_vsx_st(vss, 0, &res_vss);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vss, 0, &res_ss);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vus, 0, &res_vus);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vus, 0, &res_us);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vsc, 0, &res_vsc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vsc, 0, &res_sc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vuc, 0, &res_vuc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vuc, 0, &res_uc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vbc, 0, &res_vbc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vbc, 0, &res_sc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  vec_vsx_st(vbc, 0, &res_uc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x

  /* vec_and */
  res_vsll = vec_and(vsll, vsll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vsll = vec_and(vbll, vsll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vsll = vec_and(vsll, vbll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_and(vull, vull);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_and(vbll, vull);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_and(vull, vbll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vbll = vec_and(vbll, vbll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  /* vec_vand */
  res_vsll = vec_vand(vsll, vsll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vsll = vec_vand(vbll, vsll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vsll = vec_vand(vsll, vbll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_vand(vull, vull);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_vand(vbll, vull);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_vand(vull, vbll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vbll = vec_vand(vbll, vbll);
// CHECK: and <2 x i64>
// CHECK-LE: and <2 x i64>

  /* vec_andc */
  res_vsll = vec_andc(vsll, vsll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
// CHECK-LE: xor <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vsll = vec_andc(vbll, vsll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
// CHECK-LE: xor <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vsll = vec_andc(vsll, vbll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
// CHECK-LE: xor <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_andc(vull, vull);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
// CHECK-LE: xor <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_andc(vbll, vull);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
// CHECK-LE: xor <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vull = vec_andc(vull, vbll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
// CHECK-LE: xor <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vbll = vec_andc(vbll, vbll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
// CHECK-LE: xor <2 x i64>
// CHECK-LE: and <2 x i64>

  res_vf = vec_floor(vf);
// CHECK: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{[0-9]+}})
// CHECK-LE: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{[0-9]+}})

  res_vd = vec_floor(vd);
// CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{[0-9]+}})
// CHECK-LE: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{[0-9]+}})

  res_vf = vec_madd(vf, vf, vf);
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
// CHECK-LE: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})

  res_vd = vec_madd(vd, vd, vd);
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
// CHECK-LE: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})

  /* vec_mergeh */
  res_vsll = vec_mergeh(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vsll = vec_mergeh(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vsll = vec_mergeh(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_mergeh(vull, vull);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_mergeh(vull, vbll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_mergeh(vbll, vull);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  /* vec_mergel */
  res_vsll = vec_mergel(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vsll = vec_mergel(vsll, vbll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vsll = vec_mergel(vbll, vsll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_mergel(vull, vull);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_mergel(vull, vbll);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  res_vull = vec_mergel(vbll, vull);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm

  /* vec_msub */
  res_vf = vec_msub(vf, vf, vf);
// CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
// CHECK-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
// CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
// CHECK-LE-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>

  res_vd = vec_msub(vd, vd, vd);
// CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
// CHECK-NEXT: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
// CHECK-LE: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
// CHECK-LE-NEXT: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>

  res_vsll = vec_mul(vsll, vsll);
// CHECK: mul <2 x i64>
// CHECK-LE: mul <2 x i64>

  res_vull = vec_mul(vull, vull);
// CHECK: mul <2 x i64>
// CHECK-LE: mul <2 x i64>

  res_vf = vec_mul(vf, vf);
// CHECK: fmul <4 x float> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: fmul <4 x float> %{{[0-9]+}}, %{{[0-9]+}}

  res_vd = vec_mul(vd, vd);
// CHECK: fmul <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: fmul <2 x double> %{{[0-9]+}}, %{{[0-9]+}}

  res_vf = vec_nearbyint(vf);
// CHECK: call <4 x float> @llvm.round.v4f32(<4 x float> %{{[0-9]+}})
// CHECK-LE: call <4 x float> @llvm.round.v4f32(<4 x float> %{{[0-9]+}})

  res_vd = vec_nearbyint(vd);
// CHECK: call <2 x double> @llvm.round.v2f64(<2 x double> %{{[0-9]+}})
// CHECK-LE: call <2 x double> @llvm.round.v2f64(<2 x double> %{{[0-9]+}})

  res_vf = vec_nmadd(vf, vf, vf);
// CHECK: [[FM:[0-9]+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
// CHECK-NEXT: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %[[FM]]
// CHECK-LE: [[FM:[0-9]+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
// CHECK-LE-NEXT: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %[[FM]]

  res_vd = vec_nmadd(vd, vd, vd);
// CHECK: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
// CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
// CHECK-LE: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
// CHECK-LE-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]

  res_vf = vec_nmsub(vf, vf, vf);
// CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
// CHECK-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
// CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
// CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
// CHECK-LE-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
// CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}

  res_vd = vec_nmsub(vd, vd, vd);
// CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
// CHECK-NEXT: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
// CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
// CHECK-LE: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
// CHECK-LE-NEXT: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
// CHECK-LE-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]

  /* vec_nor */
  res_vsll = vec_nor(vsll, vsll);
// CHECK: or <2 x i64>
// CHECK: xor <2 x i64>
// CHECK-LE: or <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_nor(vull, vull);
// CHECK: or <2 x i64>
// CHECK: xor <2 x i64>
// CHECK-LE: or <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_nor(vbll, vbll);
// CHECK: or <2 x i64>
// CHECK: xor <2 x i64>
// CHECK-LE: or <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vd = vec_nor(vd, vd);
// CHECK: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK: [[OR:%.+]] = or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-NEXT: xor <2 x i64> [[OR]], <i64 -1, i64 -1>
// CHECK-LE: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK-LE: [[OR:%.+]] = or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE-NEXT: xor <2 x i64> [[OR]], <i64 -1, i64 -1>

  /* vec_or */
  res_vsll = vec_or(vsll, vsll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vsll = vec_or(vbll, vsll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vsll = vec_or(vsll, vbll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vull = vec_or(vull, vull);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vull = vec_or(vbll, vull);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vull = vec_or(vull, vbll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vbll = vec_or(vbll, vbll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vd = vec_or(vd, vd);
// CHECK: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK: or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK-LE: or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}

  res_vd = vec_or(vbll, vd);
// CHECK: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK: [[T2:%.+]] = or <2 x i64> %{{[0-9]+}}, [[T1]]
// CHECK: bitcast <2 x i64> [[T2]] to <2 x double>
// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK-LE: [[T2:%.+]] = or <2 x i64> %{{[0-9]+}}, [[T1]]
// CHECK-LE: bitcast <2 x i64> [[T2]] to <2 x double>

  res_vd = vec_or(vd, vbll);
// CHECK: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK: [[T2:%.+]] = or <2 x i64> [[T1]], %{{[0-9]+}}
// CHECK: bitcast <2 x i64> [[T2]] to <2 x double>
// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
// CHECK-LE: [[T2:%.+]] = or <2 x i64> [[T1]], %{{[0-9]+}}
// CHECK-LE: bitcast <2 x i64> [[T2]] to <2 x double>

  res_vf = vec_re(vf);
// CHECK: call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>
// CHECK-LE: call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>

  res_vd = vec_re(vd);
// CHECK: call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>

  res_vf = vec_rint(vf);
// CHECK: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{[0-9]+}})
// CHECK-LE: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{[0-9]+}})

  res_vd = vec_rint(vd);
// CHECK: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{[0-9]+}})
// CHECK-LE: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{[0-9]+}})

  res_vf = vec_rsqrte(vf);
// CHECK: call <4 x float> @llvm.ppc.vsx.xvrsqrtesp(<4 x float> %{{[0-9]+}})
// CHECK-LE: call <4 x float> @llvm.ppc.vsx.xvrsqrtesp(<4 x float> %{{[0-9]+}})

  res_vd = vec_rsqrte(vd);
// CHECK: call <2 x double> @llvm.ppc.vsx.xvrsqrtedp(<2 x double> %{{[0-9]+}})
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.xvrsqrtedp(<2 x double> %{{[0-9]+}})

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vf = vec_sel(vd, vd, vbll);
// CHECK: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
// CHECK: and <2 x i64> %{{[0-9]+}},
// CHECK: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK: or <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
// CHECK-LE: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
// CHECK-LE: and <2 x i64> %{{[0-9]+}},
// CHECK-LE: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: or <2 x i64>
// CHECK-LE: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_sel(vd, vd, vull);
// CHECK: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
// CHECK: and <2 x i64> %{{[0-9]+}},
// CHECK: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK: or <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
// CHECK-LE: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
// CHECK-LE: and <2 x i64> %{{[0-9]+}},
// CHECK-LE: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: or <2 x i64>
// CHECK-LE: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>

  res_vf = vec_sqrt(vf);
// CHECK: call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{[0-9]+}})
// CHECK-LE: call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{[0-9]+}})

  res_vd = vec_sqrt(vd);
// CHECK: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{[0-9]+}})
// CHECK-LE: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{[0-9]+}})

  res_vd = vec_sub(vd, vd);
// CHECK: fsub <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: fsub <2 x double> %{{[0-9]+}}, %{{[0-9]+}}

  res_vf = vec_trunc(vf);
// CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{[0-9]+}})
// CHECK-LE: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{[0-9]+}})

  res_vd = vec_trunc(vd);
// CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{[0-9]+}})
// CHECK-LE: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{[0-9]+}})

  /* vec_vor */
  res_vsll = vec_vor(vsll, vsll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vsll = vec_vor(vbll, vsll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vsll = vec_vor(vsll, vbll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vull = vec_vor(vull, vull);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vull = vec_vor(vbll, vull);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vull = vec_vor(vull, vbll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  res_vbll = vec_vor(vbll, vbll);
// CHECK: or <2 x i64>
// CHECK-LE: or <2 x i64>

  /* vec_xor */
  res_vsll = vec_xor(vsll, vsll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vsll = vec_xor(vbll, vsll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vsll = vec_xor(vsll, vbll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_xor(vull, vull);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_xor(vbll, vull);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_xor(vull, vbll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vbll = vec_xor(vbll, vbll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_xor(vd, vd);
// CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
// CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_xor(vd, vbll);
// CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
// CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>

  dummy();
// CHECK: call void @dummy()
// CHECK-LE: call void @dummy()

  res_vd = vec_xor(vbll, vd);
// CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
// CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>

  /* vec_vxor */
  res_vsll = vec_vxor(vsll, vsll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vsll = vec_vxor(vbll, vsll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vsll = vec_vxor(vsll, vbll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_vxor(vull, vull);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_vxor(vbll, vull);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vull = vec_vxor(vull, vbll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vbll = vec_vxor(vbll, vbll);
// CHECK: xor <2 x i64>
// CHECK-LE: xor <2 x i64>

  res_vsll = vec_cts(vd, 0);
// CHECK: fmul <2 x double>
// CHECK: fptosi <2 x double> %{{.*}} to <2 x i64>
// CHECK-LE: fmul <2 x double>
// CHECK-LE: fptosi <2 x double> %{{.*}} to <2 x i64>

  res_vsll = vec_cts(vd, 31);
// CHECK: fmul <2 x double>
// CHECK: fptosi <2 x double> %{{.*}} to <2 x i64>
// CHECK-LE: fmul <2 x double>
// CHECK-LE: fptosi <2 x double> %{{.*}} to <2 x i64>

  res_vsll = vec_ctu(vd, 0);
// CHECK: fmul <2 x double>
// CHECK: fptoui <2 x double> %{{.*}} to <2 x i64>
// CHECK-LE: fmul <2 x double>
// CHECK-LE: fptoui <2 x double> %{{.*}} to <2 x i64>

  res_vsll = vec_ctu(vd, 31);
// CHECK: fmul <2 x double>
// CHECK: fptoui <2 x double> %{{.*}} to <2 x i64>
// CHECK-LE: fmul <2 x double>
// CHECK-LE: fptoui <2 x double> %{{.*}} to <2 x i64>

  res_vd = vec_ctf(vsll, 0);
// CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK: fmul <2 x double>
// CHECK-LE: sitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK-LE: fmul <2 x double>

  res_vd = vec_ctf(vsll, 31);
// CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK: fmul <2 x double>
// CHECK-LE: sitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK-LE: fmul <2 x double>

  res_vd = vec_ctf(vull, 0);
// CHECK: uitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK: fmul <2 x double>
// CHECK-LE: uitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK-LE: fmul <2 x double>

  res_vd = vec_ctf(vull, 31);
// CHECK: uitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK: fmul <2 x double>
// CHECK-LE: uitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK-LE: fmul <2 x double>
}
コード例 #14
0
static void ProjectDlightTexture_altivec( void ) {
	int		i, l;
	vec_t	origin0, origin1, origin2;
	float   texCoords0, texCoords1;
	vector float floatColorVec0, floatColorVec1;
	vector float modulateVec, colorVec, zero;
	vector short colorShort;
	vector signed int colorInt;
	vector unsigned char floatColorVecPerm, modulatePerm, colorChar;
	vector unsigned char vSel = VECCONST_UINT8(0x00, 0x00, 0x00, 0xff,
                                               0x00, 0x00, 0x00, 0xff,
                                               0x00, 0x00, 0x00, 0xff,
                                               0x00, 0x00, 0x00, 0xff);
	float	*texCoords;
	byte	*colors;
	int		*intColors;
	byte	clipBits[SHADER_MAX_VERTEXES];
	float	texCoordsArray[SHADER_MAX_VERTEXES][2];
	byte	colorArray[SHADER_MAX_VERTEXES][4];
	glIndex_t	hitIndexes[SHADER_MAX_INDEXES];
	int		numIndexes;
	float	scale;
	float	radius;
	float	radiusInverseCubed;
	float	intensity, remainder;
	vec3_t	floatColor;
	float	modulate = 0.0f;
	qboolean vertexLight;

	if ( !backEnd.refdef.num_dlights ) {
		return;
	}

	// There has to be a better way to do this so that floatColor
	// and/or modulate are already 16-byte aligned.
	floatColorVecPerm = vec_lvsl(0,(float *)floatColor);
	modulatePerm = vec_lvsl(0,(float *)&modulate);
	modulatePerm = (vector unsigned char)vec_splat((vector unsigned int)modulatePerm,0);
	zero = (vector float)vec_splat_s8(0);

	for ( l = 0 ; l < backEnd.refdef.num_dlights ; l++ ) {
		dlight_t	*dl;

		if ( !( tess.dlightBits & ( 1 << l ) ) ) {
			continue;	// this surface definately doesn't have any of this light
		}

		// clear colors
		Com_Memset( colorArray, 0, sizeof( colorArray ) );

		texCoords = texCoordsArray[0];
		colors = colorArray[0];

		dl = &backEnd.refdef.dlights[l];
		origin0 = dl->transformed[0];
		origin1 = dl->transformed[1];
		origin2 = dl->transformed[2];
		radius = dl->radius;
		scale = 1.0f / radius;
		radiusInverseCubed = dl->radiusInverseCubed;
		intensity = dl->intensity;

		vertexLight = ( ( dl->flags & REF_DIRECTED_DLIGHT ) || ( dl->flags & REF_VERTEX_DLIGHT ) );

		// directional lights have max intensity and washout remainder intensity
		if ( dl->flags & REF_DIRECTED_DLIGHT ) {
			remainder = intensity * 0.125;
		} else {
			remainder = 0.0f;
		}

		if(r_greyscale->integer)
		{
			float luminance;
			
			luminance = LUMA(dl->color[0], dl->color[1], dl->color[2]) * 255.0f;
			floatColor[0] = floatColor[1] = floatColor[2] = luminance;
		}
		else if(r_greyscale->value)
		{
			float luminance;
			
			luminance = LUMA(dl->color[0], dl->color[1], dl->color[2]) * 255.0f;
			floatColor[0] = LERP(dl->color[0] * 255.0f, luminance, r_greyscale->value);
			floatColor[1] = LERP(dl->color[1] * 255.0f, luminance, r_greyscale->value);
			floatColor[2] = LERP(dl->color[2] * 255.0f, luminance, r_greyscale->value);
		}
		else
		{
			floatColor[0] = dl->color[0] * 255.0f;
			floatColor[1] = dl->color[1] * 255.0f;
			floatColor[2] = dl->color[2] * 255.0f;
		}
		floatColorVec0 = vec_ld(0, floatColor);
		floatColorVec1 = vec_ld(11, floatColor);
		floatColorVec0 = vec_perm(floatColorVec0,floatColorVec0,floatColorVecPerm);
		for ( i = 0 ; i < tess.numVertexes ; i++, texCoords += 2, colors += 4 ) {
			int		clip = 0;
			vec_t dist0, dist1, dist2;
			
			dist0 = origin0 - tess.xyz[i][0];
			dist1 = origin1 - tess.xyz[i][1];
			dist2 = origin2 - tess.xyz[i][2];

			backEnd.pc.c_dlightVertexes++;

			// directional dlight, origin is a directional normal
			if ( dl->flags & REF_DIRECTED_DLIGHT ) {
				// twosided surfaces use absolute value of the calculated lighting
				modulate = intensity * DotProduct( dl->origin, tess.normal[ i ] );
				if ( tess.shader->cullType == CT_TWO_SIDED ) {
					modulate = fabs( modulate );
				}
				modulate += remainder;
			}
			// spherical vertex lit dlight
			else if ( dl->flags & REF_VERTEX_DLIGHT )
			{
				vec3_t	dir;

				dir[ 0 ] = radius - fabs( dist0 );
				if ( dir[ 0 ] <= 0.0f ) {
					continue;
				}
				dir[ 1 ] = radius - fabs( dist1 );
				if ( dir[ 1 ] <= 0.0f ) {
					continue;
				}
				dir[ 2 ] = radius - fabs( dist2 );
				if ( dir[ 2 ] <= 0.0f ) {
					continue;
				}

				modulate = intensity * dir[ 0 ] * dir[ 1 ] * dir[ 2 ] * radiusInverseCubed;
			}
			// vertical cylinder dlight
			else
			{
				texCoords0 = 0.5f + dist0 * scale;
				texCoords1 = 0.5f + dist1 * scale;

				if( !r_dlightBacks->integer &&
						// dist . tess.normal[i]
						( dist0 * tess.normal[i][0] +
						dist1 * tess.normal[i][1] +
						dist2 * tess.normal[i][2] ) < 0.0f ) {
					clip = 63;
				} else {
					if ( texCoords0 < 0.0f ) {
						clip |= 1;
					} else if ( texCoords0 > 1.0f ) {
						clip |= 2;
					}
					if ( texCoords1 < 0.0f ) {
						clip |= 4;
					} else if ( texCoords1 > 1.0f ) {
						clip |= 8;
					}
					texCoords[0] = texCoords0;
					texCoords[1] = texCoords1;

					// modulate the strength based on the height and color
					if ( dist2 > radius ) {
						clip |= 16;
						modulate = 0.0f;
					} else if ( dist2 < -radius ) {
						clip |= 32;
						modulate = 0.0f;
					} else {
						dist2 = Q_fabs(dist2);
						if ( dist2 < radius * 0.5f ) {
							modulate = intensity;
						} else {
							modulate = intensity * 2.0f * (radius - dist2) * scale;
						}
					}
				}
			}
			clipBits[i] = clip;

			// optimizations
			if ( vertexLight && modulate < ( 1.0f / 128.0f ) ) {
				continue;
			} else if ( modulate > 1.0f ) {
				modulate = 1.0f;
			}

			// ZTM: FIXME: should probably clamp to 0-255 range before converting to char,
			// but I don't know how to do altvec stuff or if it's even used anymore
			modulateVec = vec_ld(0,(float *)&modulate);
			modulateVec = vec_perm(modulateVec,modulateVec,modulatePerm);
			colorVec = vec_madd(floatColorVec0,modulateVec,zero);
			colorInt = vec_cts(colorVec,0);	// RGBx
			colorShort = vec_pack(colorInt,colorInt);		// RGBxRGBx
			colorChar = vec_packsu(colorShort,colorShort);	// RGBxRGBxRGBxRGBx
			colorChar = vec_sel(colorChar,vSel,vSel);		// RGBARGBARGBARGBA replace alpha with 255
			vec_ste((vector unsigned int)colorChar,0,(unsigned int *)colors);	// store color
		}

		// build a list of triangles that need light
		intColors = (int*) colorArray;
		numIndexes = 0;
		for ( i = 0 ; i < tess.numIndexes ; i += 3 ) {
			int		a, b, c;

			a = tess.indexes[i];
			b = tess.indexes[i+1];
			c = tess.indexes[i+2];
			if ( vertexLight ) {
				if ( !( intColors[ a ] | intColors[ b ] | intColors[ c ] ) ) {
					continue;
				}
			} else {
				if ( clipBits[a] & clipBits[b] & clipBits[c] ) {
					continue;	// not lighted
				}
			}
			hitIndexes[numIndexes] = a;
			hitIndexes[numIndexes+1] = b;
			hitIndexes[numIndexes+2] = c;
			numIndexes += 3;
		}

		if ( !numIndexes ) {
			continue;
		}

		if ( !vertexLight ) {
			qglEnableClientState( GL_TEXTURE_COORD_ARRAY );
			qglTexCoordPointer( 2, GL_FLOAT, 0, texCoordsArray[0] );
		} else {
			qglDisableClientState( GL_TEXTURE_COORD_ARRAY );
		}

		qglEnableClientState( GL_COLOR_ARRAY );
		qglColorPointer( 4, GL_UNSIGNED_BYTE, 0, colorArray );

		if ( dl->dlshader ) {
			shader_t *dls = dl->dlshader;

			for ( i = 0; i < dls->numUnfoggedPasses; i++ ) {
				shaderStage_t *stage = dls->stages[i];
				R_BindAnimatedImage( &dls->stages[i]->bundle[0] );
				GL_State( stage->stateBits | GLS_DEPTHFUNC_EQUAL );
				R_DrawElements( numIndexes, hitIndexes );
				backEnd.pc.c_totalIndexes += numIndexes;
				backEnd.pc.c_dlightIndexes += numIndexes;
			}
		} else {
			R_FogOff();
			if ( !vertexLight ) {
				GL_Bind( tr.dlightImage );
			} else {
				GL_Bind( tr.whiteImage );
			}
			// include GLS_DEPTHFUNC_EQUAL so alpha tested surfaces don't add light
			// where they aren't rendered
			if ( dl->flags & REF_ADDITIVE_DLIGHT ) {
				GL_State( GLS_SRCBLEND_ONE | GLS_DSTBLEND_ONE | GLS_DEPTHFUNC_EQUAL );
			}
			else {
				GL_State( GLS_SRCBLEND_DST_COLOR | GLS_DSTBLEND_ONE | GLS_DEPTHFUNC_EQUAL );
			}
			R_DrawElements( numIndexes, hitIndexes );
			backEnd.pc.c_totalIndexes += numIndexes;
			backEnd.pc.c_dlightIndexes += numIndexes;
			RB_FogOn();
		}
	}
}
コード例 #15
0
/* this code assume that stride % 16 == 0 */
void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
  POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1);
    DECLARE_ALIGNED_16(signed int, ABCD[4]) =
                        {((8 - x) * (8 - y)),
                          ((x) * (8 - y)),
                          ((8 - x) * (y)),
                          ((x) * (y))};
    register int i;
    vec_u8_t fperm;
    const vec_s32_t vABCD = vec_ld(0, ABCD);
    const vec_s16_t vA = vec_splat((vec_s16_t)vABCD, 1);
    const vec_s16_t vB = vec_splat((vec_s16_t)vABCD, 3);
    const vec_s16_t vC = vec_splat((vec_s16_t)vABCD, 5);
    const vec_s16_t vD = vec_splat((vec_s16_t)vABCD, 7);
    LOAD_ZERO;
    const vec_s16_t v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
    const vec_u16_t v6us = vec_splat_u16(6);
    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;

    vec_u8_t vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
    vec_u8_t vsrc0uc, vsrc1uc;
    vec_s16_t vsrc0ssH, vsrc1ssH;
    vec_u8_t vsrcCuc, vsrc2uc, vsrc3uc;
    vec_s16_t vsrc2ssH, vsrc3ssH, psum;
    vec_u8_t vdst, ppsum, vfdst, fsum;

  POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1);

    if (((unsigned long)dst) % 16 == 0) {
      fperm = (vec_u8_t)AVV(0x10, 0x11, 0x12, 0x13,
                            0x14, 0x15, 0x16, 0x17,
                            0x08, 0x09, 0x0A, 0x0B,
                            0x0C, 0x0D, 0x0E, 0x0F);
    } else {
      fperm = (vec_u8_t)AVV(0x00, 0x01, 0x02, 0x03,
                            0x04, 0x05, 0x06, 0x07,
                            0x18, 0x19, 0x1A, 0x1B,
                            0x1C, 0x1D, 0x1E, 0x1F);
    }

    vsrcAuc = vec_ld(0, src);

    if (loadSecond)
      vsrcBuc = vec_ld(16, src);
    vsrcperm0 = vec_lvsl(0, src);
    vsrcperm1 = vec_lvsl(1, src);

    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
    if (reallyBadAlign)
      vsrc1uc = vsrcBuc;
    else
      vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);

    vsrc0ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc0uc);
    vsrc1ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc1uc);

    if (!loadSecond) {// -> !reallyBadAlign
      for (i = 0 ; i < h ; i++) {


        vsrcCuc = vec_ld(stride + 0, src);

        vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
        vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);

        vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc2uc);
        vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc3uc);

        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v32ss, psum);
        psum = vec_sra(psum, v6us);

        vdst = vec_ld(0, dst);
        ppsum = (vec_u8_t)vec_packsu(psum, psum);
        vfdst = vec_perm(vdst, ppsum, fperm);

        OP_U8_ALTIVEC(fsum, vfdst, vdst);

        vec_st(fsum, 0, dst);

        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;

        dst += stride;
        src += stride;
      }
    } else {
        vec_u8_t vsrcDuc;
      for (i = 0 ; i < h ; i++) {
        vsrcCuc = vec_ld(stride + 0, src);
        vsrcDuc = vec_ld(stride + 16, src);

        vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
        if (reallyBadAlign)
          vsrc3uc = vsrcDuc;
        else
          vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);

        vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc2uc);
        vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc3uc);

        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v32ss, psum);
        psum = vec_sr(psum, v6us);

        vdst = vec_ld(0, dst);
        ppsum = (vec_u8_t)vec_pack(psum, psum);
        vfdst = vec_perm(vdst, ppsum, fperm);

        OP_U8_ALTIVEC(fsum, vfdst, vdst);

        vec_st(fsum, 0, dst);

        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;

        dst += stride;
        src += stride;
      }
    }
    POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1);
}
コード例 #16
0
static int dct_quantize_altivec(MpegEncContext* s,
                         DCTELEM* data, int n,
                         int qscale, int* overflow)
{
    int lastNonZero;
    vector float row0, row1, row2, row3, row4, row5, row6, row7;
    vector float alt0, alt1, alt2, alt3, alt4, alt5, alt6, alt7;
    const vector float zero = (const vector float)FOUROF(0.);
    // used after quantize step
    int oldBaseValue = 0;

    // Load the data into the row/alt vectors
    {
        vector signed short data0, data1, data2, data3, data4, data5, data6, data7;

        data0 = vec_ld(0, data);
        data1 = vec_ld(16, data);
        data2 = vec_ld(32, data);
        data3 = vec_ld(48, data);
        data4 = vec_ld(64, data);
        data5 = vec_ld(80, data);
        data6 = vec_ld(96, data);
        data7 = vec_ld(112, data);

        // Transpose the data before we start
        TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7);

        // load the data into floating point vectors.  We load
        // the high half of each row into the main row vectors
        // and the low half into the alt vectors.
        row0 = vec_ctf(vec_unpackh(data0), 0);
        alt0 = vec_ctf(vec_unpackl(data0), 0);
        row1 = vec_ctf(vec_unpackh(data1), 0);
        alt1 = vec_ctf(vec_unpackl(data1), 0);
        row2 = vec_ctf(vec_unpackh(data2), 0);
        alt2 = vec_ctf(vec_unpackl(data2), 0);
        row3 = vec_ctf(vec_unpackh(data3), 0);
        alt3 = vec_ctf(vec_unpackl(data3), 0);
        row4 = vec_ctf(vec_unpackh(data4), 0);
        alt4 = vec_ctf(vec_unpackl(data4), 0);
        row5 = vec_ctf(vec_unpackh(data5), 0);
        alt5 = vec_ctf(vec_unpackl(data5), 0);
        row6 = vec_ctf(vec_unpackh(data6), 0);
        alt6 = vec_ctf(vec_unpackl(data6), 0);
        row7 = vec_ctf(vec_unpackh(data7), 0);
        alt7 = vec_ctf(vec_unpackl(data7), 0);
    }

    // The following block could exist as a separate an altivec dct
                // function.  However, if we put it inline, the DCT data can remain
                // in the vector local variables, as floats, which we'll use during the
                // quantize step...
    {
        const vector float vec_0_298631336 = (vector float)FOUROF(0.298631336f);
        const vector float vec_0_390180644 = (vector float)FOUROF(-0.390180644f);
        const vector float vec_0_541196100 = (vector float)FOUROF(0.541196100f);
        const vector float vec_0_765366865 = (vector float)FOUROF(0.765366865f);
        const vector float vec_0_899976223 = (vector float)FOUROF(-0.899976223f);
        const vector float vec_1_175875602 = (vector float)FOUROF(1.175875602f);
        const vector float vec_1_501321110 = (vector float)FOUROF(1.501321110f);
        const vector float vec_1_847759065 = (vector float)FOUROF(-1.847759065f);
        const vector float vec_1_961570560 = (vector float)FOUROF(-1.961570560f);
        const vector float vec_2_053119869 = (vector float)FOUROF(2.053119869f);
        const vector float vec_2_562915447 = (vector float)FOUROF(-2.562915447f);
        const vector float vec_3_072711026 = (vector float)FOUROF(3.072711026f);


        int whichPass, whichHalf;

        for(whichPass = 1; whichPass<=2; whichPass++) {
            for(whichHalf = 1; whichHalf<=2; whichHalf++) {
                vector float tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
                vector float tmp10, tmp11, tmp12, tmp13;
                vector float z1, z2, z3, z4, z5;

                tmp0 = vec_add(row0, row7); // tmp0 = dataptr[0] + dataptr[7];
                tmp7 = vec_sub(row0, row7); // tmp7 = dataptr[0] - dataptr[7];
                tmp3 = vec_add(row3, row4); // tmp3 = dataptr[3] + dataptr[4];
                tmp4 = vec_sub(row3, row4); // tmp4 = dataptr[3] - dataptr[4];
                tmp1 = vec_add(row1, row6); // tmp1 = dataptr[1] + dataptr[6];
                tmp6 = vec_sub(row1, row6); // tmp6 = dataptr[1] - dataptr[6];
                tmp2 = vec_add(row2, row5); // tmp2 = dataptr[2] + dataptr[5];
                tmp5 = vec_sub(row2, row5); // tmp5 = dataptr[2] - dataptr[5];

                tmp10 = vec_add(tmp0, tmp3); // tmp10 = tmp0 + tmp3;
                tmp13 = vec_sub(tmp0, tmp3); // tmp13 = tmp0 - tmp3;
                tmp11 = vec_add(tmp1, tmp2); // tmp11 = tmp1 + tmp2;
                tmp12 = vec_sub(tmp1, tmp2); // tmp12 = tmp1 - tmp2;


                // dataptr[0] = (DCTELEM) ((tmp10 + tmp11) << PASS1_BITS);
                row0 = vec_add(tmp10, tmp11);

                // dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS);
                row4 = vec_sub(tmp10, tmp11);


                // z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100);
                z1 = vec_madd(vec_add(tmp12, tmp13), vec_0_541196100, (vector float)zero);

                // dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865),
                //                                CONST_BITS-PASS1_BITS);
                row2 = vec_madd(tmp13, vec_0_765366865, z1);

                // dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065),
                //                                CONST_BITS-PASS1_BITS);
                row6 = vec_madd(tmp12, vec_1_847759065, z1);

                z1 = vec_add(tmp4, tmp7); // z1 = tmp4 + tmp7;
                z2 = vec_add(tmp5, tmp6); // z2 = tmp5 + tmp6;
                z3 = vec_add(tmp4, tmp6); // z3 = tmp4 + tmp6;
                z4 = vec_add(tmp5, tmp7); // z4 = tmp5 + tmp7;

                // z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */
                z5 = vec_madd(vec_add(z3, z4), vec_1_175875602, (vector float)zero);

                // z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
                z3 = vec_madd(z3, vec_1_961570560, z5);

                // z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
                z4 = vec_madd(z4, vec_0_390180644, z5);

                // The following adds are rolled into the multiplies above
                // z3 = vec_add(z3, z5);  // z3 += z5;
                // z4 = vec_add(z4, z5);  // z4 += z5;

                // z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
                // Wow!  It's actually more efficient to roll this multiply
                // into the adds below, even thought the multiply gets done twice!
                // z2 = vec_madd(z2, vec_2_562915447, (vector float)zero);

                // z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
                // Same with this one...
                // z1 = vec_madd(z1, vec_0_899976223, (vector float)zero);

                // tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
                // dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS);
                row7 = vec_madd(tmp4, vec_0_298631336, vec_madd(z1, vec_0_899976223, z3));

                // tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
                // dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS);
                row5 = vec_madd(tmp5, vec_2_053119869, vec_madd(z2, vec_2_562915447, z4));

                // tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
                // dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS);
                row3 = vec_madd(tmp6, vec_3_072711026, vec_madd(z2, vec_2_562915447, z3));

                // tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
                // dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS);
                row1 = vec_madd(z1, vec_0_899976223, vec_madd(tmp7, vec_1_501321110, z4));

                // Swap the row values with the alts.  If this is the first half,
                // this sets up the low values to be acted on in the second half.
                // If this is the second half, it puts the high values back in
                // the row values where they are expected to be when we're done.
                SWAP(row0, alt0);
                SWAP(row1, alt1);
                SWAP(row2, alt2);
                SWAP(row3, alt3);
                SWAP(row4, alt4);
                SWAP(row5, alt5);
                SWAP(row6, alt6);
                SWAP(row7, alt7);
            }

            if (whichPass == 1) {
                // transpose the data for the second pass

                // First, block transpose the upper right with lower left.
                SWAP(row4, alt0);
                SWAP(row5, alt1);
                SWAP(row6, alt2);
                SWAP(row7, alt3);

                // Now, transpose each block of four
                TRANSPOSE4(row0, row1, row2, row3);
                TRANSPOSE4(row4, row5, row6, row7);
                TRANSPOSE4(alt0, alt1, alt2, alt3);
                TRANSPOSE4(alt4, alt5, alt6, alt7);
            }
        }
    }

    // perform the quantize step, using the floating point data
    // still in the row/alt registers
    {
        const int* biasAddr;
        const vector signed int* qmat;
        vector float bias, negBias;

        if (s->mb_intra) {
            vector signed int baseVector;

            // We must cache element 0 in the intra case
            // (it needs special handling).
            baseVector = vec_cts(vec_splat(row0, 0), 0);
            vec_ste(baseVector, 0, &oldBaseValue);

            qmat = (vector signed int*)s->q_intra_matrix[qscale];
            biasAddr = &(s->intra_quant_bias);
        } else {
            qmat = (vector signed int*)s->q_inter_matrix[qscale];
            biasAddr = &(s->inter_quant_bias);
        }

        // Load the bias vector (We add 0.5 to the bias so that we're
                                // rounding when we convert to int, instead of flooring.)
        {
            vector signed int biasInt;
            const vector float negOneFloat = (vector float)FOUROF(-1.0f);
            LOAD4(biasInt, biasAddr);
            bias = vec_ctf(biasInt, QUANT_BIAS_SHIFT);
            negBias = vec_madd(bias, negOneFloat, zero);
        }

        {
            vector float q0, q1, q2, q3, q4, q5, q6, q7;

            q0 = vec_ctf(qmat[0], QMAT_SHIFT);
            q1 = vec_ctf(qmat[2], QMAT_SHIFT);
            q2 = vec_ctf(qmat[4], QMAT_SHIFT);
            q3 = vec_ctf(qmat[6], QMAT_SHIFT);
            q4 = vec_ctf(qmat[8], QMAT_SHIFT);
            q5 = vec_ctf(qmat[10], QMAT_SHIFT);
            q6 = vec_ctf(qmat[12], QMAT_SHIFT);
            q7 = vec_ctf(qmat[14], QMAT_SHIFT);

            row0 = vec_sel(vec_madd(row0, q0, negBias), vec_madd(row0, q0, bias),
                    vec_cmpgt(row0, zero));
            row1 = vec_sel(vec_madd(row1, q1, negBias), vec_madd(row1, q1, bias),
                    vec_cmpgt(row1, zero));
            row2 = vec_sel(vec_madd(row2, q2, negBias), vec_madd(row2, q2, bias),
                    vec_cmpgt(row2, zero));
            row3 = vec_sel(vec_madd(row3, q3, negBias), vec_madd(row3, q3, bias),
                    vec_cmpgt(row3, zero));
            row4 = vec_sel(vec_madd(row4, q4, negBias), vec_madd(row4, q4, bias),
                    vec_cmpgt(row4, zero));
            row5 = vec_sel(vec_madd(row5, q5, negBias), vec_madd(row5, q5, bias),
                    vec_cmpgt(row5, zero));
            row6 = vec_sel(vec_madd(row6, q6, negBias), vec_madd(row6, q6, bias),
                    vec_cmpgt(row6, zero));
            row7 = vec_sel(vec_madd(row7, q7, negBias), vec_madd(row7, q7, bias),
                    vec_cmpgt(row7, zero));

            q0 = vec_ctf(qmat[1], QMAT_SHIFT);
            q1 = vec_ctf(qmat[3], QMAT_SHIFT);
            q2 = vec_ctf(qmat[5], QMAT_SHIFT);
            q3 = vec_ctf(qmat[7], QMAT_SHIFT);
            q4 = vec_ctf(qmat[9], QMAT_SHIFT);
            q5 = vec_ctf(qmat[11], QMAT_SHIFT);
            q6 = vec_ctf(qmat[13], QMAT_SHIFT);
            q7 = vec_ctf(qmat[15], QMAT_SHIFT);

            alt0 = vec_sel(vec_madd(alt0, q0, negBias), vec_madd(alt0, q0, bias),
                    vec_cmpgt(alt0, zero));
            alt1 = vec_sel(vec_madd(alt1, q1, negBias), vec_madd(alt1, q1, bias),
                    vec_cmpgt(alt1, zero));
            alt2 = vec_sel(vec_madd(alt2, q2, negBias), vec_madd(alt2, q2, bias),
                    vec_cmpgt(alt2, zero));
            alt3 = vec_sel(vec_madd(alt3, q3, negBias), vec_madd(alt3, q3, bias),
                    vec_cmpgt(alt3, zero));
            alt4 = vec_sel(vec_madd(alt4, q4, negBias), vec_madd(alt4, q4, bias),
                    vec_cmpgt(alt4, zero));
            alt5 = vec_sel(vec_madd(alt5, q5, negBias), vec_madd(alt5, q5, bias),
                    vec_cmpgt(alt5, zero));
            alt6 = vec_sel(vec_madd(alt6, q6, negBias), vec_madd(alt6, q6, bias),
                    vec_cmpgt(alt6, zero));
            alt7 = vec_sel(vec_madd(alt7, q7, negBias), vec_madd(alt7, q7, bias),
                    vec_cmpgt(alt7, zero));
        }


    }

    // Store the data back into the original block
    {
        vector signed short data0, data1, data2, data3, data4, data5, data6, data7;

        data0 = vec_pack(vec_cts(row0, 0), vec_cts(alt0, 0));
        data1 = vec_pack(vec_cts(row1, 0), vec_cts(alt1, 0));
        data2 = vec_pack(vec_cts(row2, 0), vec_cts(alt2, 0));
        data3 = vec_pack(vec_cts(row3, 0), vec_cts(alt3, 0));
        data4 = vec_pack(vec_cts(row4, 0), vec_cts(alt4, 0));
        data5 = vec_pack(vec_cts(row5, 0), vec_cts(alt5, 0));
        data6 = vec_pack(vec_cts(row6, 0), vec_cts(alt6, 0));
        data7 = vec_pack(vec_cts(row7, 0), vec_cts(alt7, 0));

        {
            // Clamp for overflow
            vector signed int max_q_int, min_q_int;
            vector signed short max_q, min_q;

            LOAD4(max_q_int, &(s->max_qcoeff));
            LOAD4(min_q_int, &(s->min_qcoeff));

            max_q = vec_pack(max_q_int, max_q_int);
            min_q = vec_pack(min_q_int, min_q_int);

            data0 = vec_max(vec_min(data0, max_q), min_q);
            data1 = vec_max(vec_min(data1, max_q), min_q);
            data2 = vec_max(vec_min(data2, max_q), min_q);
            data4 = vec_max(vec_min(data4, max_q), min_q);
            data5 = vec_max(vec_min(data5, max_q), min_q);
            data6 = vec_max(vec_min(data6, max_q), min_q);
            data7 = vec_max(vec_min(data7, max_q), min_q);
        }

        {
        vector bool char zero_01, zero_23, zero_45, zero_67;
        vector signed char scanIndexes_01, scanIndexes_23, scanIndexes_45, scanIndexes_67;
        vector signed char negOne = vec_splat_s8(-1);
        vector signed char* scanPtr =
                (vector signed char*)(s->intra_scantable.inverse);
        signed char lastNonZeroChar;

        // Determine the largest non-zero index.
        zero_01 = vec_pack(vec_cmpeq(data0, (vector signed short)zero),
                vec_cmpeq(data1, (vector signed short)zero));
        zero_23 = vec_pack(vec_cmpeq(data2, (vector signed short)zero),
                vec_cmpeq(data3, (vector signed short)zero));
        zero_45 = vec_pack(vec_cmpeq(data4, (vector signed short)zero),
                vec_cmpeq(data5, (vector signed short)zero));
        zero_67 = vec_pack(vec_cmpeq(data6, (vector signed short)zero),
                vec_cmpeq(data7, (vector signed short)zero));

        // 64 biggest values
        scanIndexes_01 = vec_sel(scanPtr[0], negOne, zero_01);
        scanIndexes_23 = vec_sel(scanPtr[1], negOne, zero_23);
        scanIndexes_45 = vec_sel(scanPtr[2], negOne, zero_45);
        scanIndexes_67 = vec_sel(scanPtr[3], negOne, zero_67);

        // 32 largest values
        scanIndexes_01 = vec_max(scanIndexes_01, scanIndexes_23);
        scanIndexes_45 = vec_max(scanIndexes_45, scanIndexes_67);

        // 16 largest values
        scanIndexes_01 = vec_max(scanIndexes_01, scanIndexes_45);

        // 8 largest values
        scanIndexes_01 = vec_max(vec_mergeh(scanIndexes_01, negOne),
                vec_mergel(scanIndexes_01, negOne));

        // 4 largest values
        scanIndexes_01 = vec_max(vec_mergeh(scanIndexes_01, negOne),
                vec_mergel(scanIndexes_01, negOne));

        // 2 largest values
        scanIndexes_01 = vec_max(vec_mergeh(scanIndexes_01, negOne),
                vec_mergel(scanIndexes_01, negOne));

        // largest value
        scanIndexes_01 = vec_max(vec_mergeh(scanIndexes_01, negOne),
                vec_mergel(scanIndexes_01, negOne));

        scanIndexes_01 = vec_splat(scanIndexes_01, 0);


        vec_ste(scanIndexes_01, 0, &lastNonZeroChar);

        lastNonZero = lastNonZeroChar;

        // While the data is still in vectors we check for the transpose IDCT permute
        // and handle it using the vector unit if we can.  This is the permute used
        // by the altivec idct, so it is common when using the altivec dct.

        if ((lastNonZero > 0) && (s->dsp.idct_permutation_type == FF_TRANSPOSE_IDCT_PERM)) {
            TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7);
        }

        vec_st(data0, 0, data);
        vec_st(data1, 16, data);
        vec_st(data2, 32, data);
        vec_st(data3, 48, data);
        vec_st(data4, 64, data);
        vec_st(data5, 80, data);
        vec_st(data6, 96, data);
        vec_st(data7, 112, data);
        }
    }

    // special handling of block[0]
    if (s->mb_intra) {
        if (!s->h263_aic) {
            if (n < 4)
                oldBaseValue /= s->y_dc_scale;
            else
                oldBaseValue /= s->c_dc_scale;
        }

        // Divide by 8, rounding the result
        data[0] = (oldBaseValue + 4) >> 3;
    }

    // We handled the transpose permutation above and we don't
    // need to permute the "no" permutation case.
    if ((lastNonZero > 0) &&
        (s->dsp.idct_permutation_type != FF_TRANSPOSE_IDCT_PERM) &&
        (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)) {
        ff_block_permute(data, s->dsp.idct_permutation,
                s->intra_scantable.scantable, lastNonZero);
    }

    return lastNonZero;
}
コード例 #17
0
/* this code assume that stride % 16 == 0 */
void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
  POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1);
  POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1);
    signed int ABCD[4] __attribute__((aligned(16)));
    register int i;
    ABCD[0] = ((8 - x) * (8 - y));
    ABCD[1] = ((x) * (8 - y));
    ABCD[2] = ((8 - x) * (y));
    ABCD[3] = ((x) * (y));
    const vector signed int vABCD = vec_ld(0, ABCD);
    const vector signed short vA = vec_splat((vector signed short)vABCD, 1);
    const vector signed short vB = vec_splat((vector signed short)vABCD, 3);
    const vector signed short vC = vec_splat((vector signed short)vABCD, 5);
    const vector signed short vD = vec_splat((vector signed short)vABCD, 7);
    const vector signed int vzero = vec_splat_s32(0);
    const vector signed short v32ss = (const vector signed short)AVV(32);
    const vector unsigned short v6us = vec_splat_u16(6);

    vector unsigned char fperm;

    if (((unsigned long)dst) % 16 == 0) {
      fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
                                        0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
    } else {
      fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
                                        0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
    }

    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
    
    vector unsigned char vsrcAuc;
    vector unsigned char vsrcBuc;
    vector unsigned char vsrcperm0;
    vector unsigned char vsrcperm1;
    vsrcAuc = vec_ld(0, src);
    if (loadSecond)
      vsrcBuc = vec_ld(16, src);
    vsrcperm0 = vec_lvsl(0, src);
    vsrcperm1 = vec_lvsl(1, src);
    
    vector unsigned char vsrc0uc;
    vector unsigned char vsrc1uc;
    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
    if (reallyBadAlign)
      vsrc1uc = vsrcBuc;
    else
      vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
    
    vector signed short vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc0uc);
    vector signed short vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc1uc);

    if (!loadSecond) {// -> !reallyBadAlign
      for (i = 0 ; i < h ; i++) {
        vector unsigned char vsrcCuc;
        vsrcCuc = vec_ld(stride + 0, src);
        
        vector unsigned char vsrc2uc;
        vector unsigned char vsrc3uc;
        vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
        vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
        
        vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc);
        vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc);
        
        vector signed short psum;
        
        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v32ss, psum);
        psum = vec_sra(psum, v6us);
        
        vector unsigned char vdst = vec_ld(0, dst);
        vector unsigned char ppsum = (vector unsigned char)vec_packsu(psum, psum);
        
        vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm);
        vector unsigned char fsum;
        
        OP_U8_ALTIVEC(fsum, vfdst, vdst);

        vec_st(fsum, 0, dst);
        
        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;
        
        dst += stride;
        src += stride;
      }
    } else {
      for (i = 0 ; i < h ; i++) {
        vector unsigned char vsrcCuc;
        vector unsigned char vsrcDuc;
        vsrcCuc = vec_ld(stride + 0, src);
        vsrcDuc = vec_ld(stride + 16, src);
        
        vector unsigned char vsrc2uc;
        vector unsigned char vsrc3uc;
        vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
        if (reallyBadAlign)
          vsrc3uc = vsrcDuc;
        else
          vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
        
        vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc);
        vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc);
        
        vector signed short psum;
      
        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v32ss, psum);
        psum = vec_sr(psum, v6us);
        
        vector unsigned char vdst = vec_ld(0, dst);
        vector unsigned char ppsum = (vector unsigned char)vec_pack(psum, psum); 
        
        vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm);
        vector unsigned char fsum;
        
        OP_U8_ALTIVEC(fsum, vfdst, vdst);

        vec_st(fsum, 0, dst);
        
        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;
        
        dst += stride;
        src += stride;
      }
    }
    POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1);
}
コード例 #18
0
ファイル: p8vector-builtin-3.c プロジェクト: 0day-ci/gcc
vs_sign vs_pack_2 (vi_sign a, vi_sign b)
{
  return vec_pack (a, b);
}
コード例 #19
0
ファイル: gmc_altivec.c プロジェクト: venkatarajasekhar/Qt
/* AltiVec-enhanced gmc1. ATM this code assumes stride is a multiple of 8
 * to preserve proper dst alignment. */
void ff_gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */,
                     int stride, int h, int x16, int y16, int rounder)
{
    int i;
    const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder;
    const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] = {
        (16 - x16) * (16 - y16), /* A */
             (x16) * (16 - y16), /* B */
        (16 - x16) * (y16),      /* C */
             (x16) * (y16),      /* D */
        0, 0, 0, 0               /* padding */
    };
    register const vector unsigned char vczero =
        (const vector unsigned char) vec_splat_u8(0);
    register const vector unsigned short vcsr8 =
        (const vector unsigned short) vec_splat_u16(8);
    register vector unsigned char dstv, dstv2, srcvB, srcvC, srcvD;
    register vector unsigned short tempB, tempC, tempD;
    unsigned long dst_odd        = (unsigned long) dst & 0x0000000F;
    unsigned long src_really_odd = (unsigned long) src & 0x0000000F;
    register vector unsigned short tempA =
        vec_ld(0, (const unsigned short *) ABCD);
    register vector unsigned short Av = vec_splat(tempA, 0);
    register vector unsigned short Bv = vec_splat(tempA, 1);
    register vector unsigned short Cv = vec_splat(tempA, 2);
    register vector unsigned short Dv = vec_splat(tempA, 3);
    register vector unsigned short rounderV =
        vec_splat((vec_u16) vec_lde(0, &rounder_a), 0);

    /* we'll be able to pick-up our 9 char elements at src from those
     * 32 bytes we load the first batch here, as inside the loop we can
     * reuse 'src + stride' from one iteration as the 'src' of the next. */
    register vector unsigned char src_0 = vec_ld(0, src);
    register vector unsigned char src_1 = vec_ld(16, src);
    register vector unsigned char srcvA = vec_perm(src_0, src_1,
                                                   vec_lvsl(0, src));

    if (src_really_odd != 0x0000000F)
        /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
         * on the second vector. */
        srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
    else
        srcvB = src_1;
    srcvA = vec_mergeh(vczero, srcvA);
    srcvB = vec_mergeh(vczero, srcvB);

    for (i = 0; i < h; i++) {
        dst_odd        =   (unsigned long) dst            & 0x0000000F;
        src_really_odd = (((unsigned long) src) + stride) & 0x0000000F;

        dstv = vec_ld(0, dst);

        /* We'll be able to pick-up our 9 char elements at src + stride from
         * those 32 bytes then reuse the resulting 2 vectors srvcC and srcvD
         * as the next srcvA and srcvB. */
        src_0 = vec_ld(stride +  0, src);
        src_1 = vec_ld(stride + 16, src);
        srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));

        if (src_really_odd != 0x0000000F)
            /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
             * on the second vector. */
            srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
        else
            srcvD = src_1;

        srcvC = vec_mergeh(vczero, srcvC);
        srcvD = vec_mergeh(vczero, srcvD);

        /* OK, now we (finally) do the math :-)
         * Those four instructions replace 32 int muls & 32 int adds.
         * Isn't AltiVec nice? */
        tempA = vec_mladd((vector unsigned short) srcvA, Av, rounderV);
        tempB = vec_mladd((vector unsigned short) srcvB, Bv, tempA);
        tempC = vec_mladd((vector unsigned short) srcvC, Cv, tempB);
        tempD = vec_mladd((vector unsigned short) srcvD, Dv, tempC);

        srcvA = srcvC;
        srcvB = srcvD;

        tempD = vec_sr(tempD, vcsr8);

        dstv2 = vec_pack(tempD, (vector unsigned short) vczero);

        if (dst_odd)
            dstv2 = vec_perm(dstv, dstv2, vcprm(0, 1, s0, s1));
        else
            dstv2 = vec_perm(dstv, dstv2, vcprm(s0, s1, 2, 3));

        vec_st(dstv2, 0, dst);

        dst += stride;
        src += stride;
    }
}
コード例 #20
0
void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
{
POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND);
    const DECLARE_ALIGNED_16(unsigned short, rounder_a[8]) =
      {rounder, rounder, rounder, rounder,
       rounder, rounder, rounder, rounder};
    const DECLARE_ALIGNED_16(unsigned short, ABCD[8]) =
      {
        (16-x16)*(16-y16), /* A */
        (   x16)*(16-y16), /* B */
        (16-x16)*(   y16), /* C */
        (   x16)*(   y16), /* D */
        0, 0, 0, 0         /* padding */
      };
    register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
    register const_vector unsigned short vcsr8 = (const_vector unsigned short)vec_splat_u16(8);
    register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
    register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
    int i;
    unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
    unsigned long src_really_odd = (unsigned long)src & 0x0000000F;


POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);

    tempA = vec_ld(0, (unsigned short*)ABCD);
    Av = vec_splat(tempA, 0);
    Bv = vec_splat(tempA, 1);
    Cv = vec_splat(tempA, 2);
    Dv = vec_splat(tempA, 3);

    rounderV = vec_ld(0, (unsigned short*)rounder_a);

    // we'll be able to pick-up our 9 char elements
    // at src from those 32 bytes
    // we load the first batch here, as inside the loop
    // we can re-use 'src+stride' from one iteration
    // as the 'src' of the next.
    src_0 = vec_ld(0, src);
    src_1 = vec_ld(16, src);
    srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));

    if (src_really_odd != 0x0000000F)
    { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
      srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
    }
    else
    {
      srcvB = src_1;
    }
    srcvA = vec_mergeh(vczero, srcvA);
    srcvB = vec_mergeh(vczero, srcvB);

    for(i=0; i<h; i++)
    {
      dst_odd = (unsigned long)dst & 0x0000000F;
      src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;

      dstv = vec_ld(0, dst);

      // we we'll be able to pick-up our 9 char elements
      // at src + stride from those 32 bytes
      // then reuse the resulting 2 vectors srvcC and srcvD
      // as the next srcvA and srcvB
      src_0 = vec_ld(stride + 0, src);
      src_1 = vec_ld(stride + 16, src);
      srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));

      if (src_really_odd != 0x0000000F)
      { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
        srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
      }
      else
      {
        srcvD = src_1;
      }

      srcvC = vec_mergeh(vczero, srcvC);
      srcvD = vec_mergeh(vczero, srcvD);


      // OK, now we (finally) do the math :-)
      // those four instructions replaces 32 int muls & 32 int adds.
      // isn't AltiVec nice ?
      tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
      tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
      tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
      tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);

      srcvA = srcvC;
      srcvB = srcvD;

      tempD = vec_sr(tempD, vcsr8);

      dstv2 = vec_pack(tempD, (vector unsigned short)vczero);

      if (dst_odd)
      {
        dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
      }
      else
      {
        dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
      }

      vec_st(dstv2, 0, dst);

      dst += stride;
      src += stride;
    }

POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
}
コード例 #21
0
ファイル: pack-be-order.c プロジェクト: suseecho/gcc
static void test()
{
  /* Input vectors.  */
  vector unsigned short vusa = {0,1,2,3,4,5,6,7};
  vector unsigned short vusb = {8,9,10,11,12,13,14,15};
  vector signed short vssa = {-8,-7,-6,-5,-4,-3,-2,-1};
  vector signed short vssb = {0,1,2,3,4,5,6,7};
  vector bool short vbsa = {0,65535,65535,0,0,0,65535,0};
  vector bool short vbsb = {65535,0,0,65535,65535,65535,0,65535};
  vector unsigned int vuia = {0,1,2,3};
  vector unsigned int vuib = {4,5,6,7};
  vector signed int vsia = {-4,-3,-2,-1};
  vector signed int vsib = {0,1,2,3};
  vector bool int vbia = {0,BIG,BIG,BIG};
  vector bool int vbib = {BIG,0,0,0};
  vector unsigned int vipa = {(0<<24) + (2<<19) + (3<<11) + (4<<3),
			      (1<<24) + (5<<19) + (6<<11) + (7<<3),
			      (0<<24) + (8<<19) + (9<<11) + (10<<3),
			      (1<<24) + (11<<19) + (12<<11) + (13<<3)};
  vector unsigned int vipb = {(1<<24) + (14<<19) + (15<<11) + (16<<3),
			      (0<<24) + (17<<19) + (18<<11) + (19<<3),
			      (1<<24) + (20<<19) + (21<<11) + (22<<3),
			      (0<<24) + (23<<19) + (24<<11) + (25<<3)};
  vector unsigned short vusc = {0,256,1,257,2,258,3,259};
  vector unsigned short vusd = {4,260,5,261,6,262,7,263};
  vector signed short vssc = {-1,-128,0,127,-2,-129,1,128};
  vector signed short vssd = {-3,-130,2,129,-4,-131,3,130};
  vector unsigned int vuic = {0,65536,1,65537};
  vector unsigned int vuid = {2,65538,3,65539};
  vector signed int vsic = {-1,-32768,0,32767};
  vector signed int vsid = {-2,-32769,1,32768};

  /* Result vectors.  */
  vector unsigned char vucr;
  vector signed char vscr;
  vector bool char vbcr;
  vector unsigned short vusr;
  vector signed short vssr;
  vector bool short vbsr;
  vector pixel vpr;
  vector unsigned char vucsr;
  vector signed char vscsr;
  vector unsigned short vussr;
  vector signed short vsssr;
  vector unsigned char vucsur1, vucsur2;
  vector unsigned short vussur1, vussur2;

  /* Expected result vectors.  */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  vector unsigned char vucer = {8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7};
  vector signed char vscer = {0,1,2,3,4,5,6,7,-8,-7,-6,-5,-4,-3,-2,-1};
  vector bool char vbcer = {255,0,0,255,255,255,0,255,0,255,255,0,0,0,255,0};
  vector unsigned short vuser = {4,5,6,7,0,1,2,3};
  vector signed short vsser = {0,1,2,3,-4,-3,-2,-1};
  vector bool short vbser = {65535,0,0,0,0,65535,65535,65535};
  vector pixel vper = {(1<<15) + (14<<10) + (15<<5) + 16,
		       (0<<15) + (17<<10) + (18<<5) + 19,
		       (1<<15) + (20<<10) + (21<<5) + 22,
		       (0<<15) + (23<<10) + (24<<5) + 25,
		       (0<<15) + (2<<10) + (3<<5) + 4,
		       (1<<15) + (5<<10) + (6<<5) + 7,
		       (0<<15) + (8<<10) + (9<<5) + 10,
		       (1<<15) + (11<<10) + (12<<5) + 13};
  vector unsigned char vucser = {4,255,5,255,6,255,7,255,0,255,1,255,2,255,3,255};
  vector signed char vscser = {-3,-128,2,127,-4,-128,3,127,
			       -1,-128,0,127,-2,-128,1,127};
  vector unsigned short vusser = {2,65535,3,65535,0,65535,1,65535};
  vector signed short vssser = {-2,-32768,1,32767,-1,-32768,0,32767};
  vector unsigned char vucsuer1 = {4,255,5,255,6,255,7,255,0,255,1,255,2,255,3,255};
  vector unsigned char vucsuer2 = {0,0,2,129,0,0,3,130,0,0,0,127,0,0,1,128};
  vector unsigned short vussuer1 = {2,65535,3,65535,0,65535,1,65535};
  vector unsigned short vussuer2 = {0,0,1,32768,0,0,0,32767};
#else
  vector unsigned char vucer = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
  vector signed char vscer = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
  vector bool char vbcer = {0,255,255,0,0,0,255,0,255,0,0,255,255,255,0,255};
  vector unsigned short vuser = {0,1,2,3,4,5,6,7};
  vector signed short vsser = {-4,-3,-2,-1,0,1,2,3};
  vector bool short vbser = {0,65535,65535,65535,65535,0,0,0};
  vector pixel vper = {(0<<15) + (2<<10) + (3<<5) + 4,
		       (1<<15) + (5<<10) + (6<<5) + 7,
		       (0<<15) + (8<<10) + (9<<5) + 10,
		       (1<<15) + (11<<10) + (12<<5) + 13,
		       (1<<15) + (14<<10) + (15<<5) + 16,
		       (0<<15) + (17<<10) + (18<<5) + 19,
		       (1<<15) + (20<<10) + (21<<5) + 22,
		       (0<<15) + (23<<10) + (24<<5) + 25};
  vector unsigned char vucser = {0,255,1,255,2,255,3,255,4,255,5,255,6,255,7,255};
  vector signed char vscser = {-1,-128,0,127,-2,-128,1,127,
			       -3,-128,2,127,-4,-128,3,127};
  vector unsigned short vusser = {0,65535,1,65535,2,65535,3,65535};
  vector signed short vssser = {-1,-32768,0,32767,-2,-32768,1,32767};
  vector unsigned char vucsuer1 = {0,255,1,255,2,255,3,255,4,255,5,255,6,255,7,255};
  vector unsigned char vucsuer2 = {0,0,0,127,0,0,1,128,0,0,2,129,0,0,3,130};
  vector unsigned short vussuer1 = {0,65535,1,65535,2,65535,3,65535};
  vector unsigned short vussuer2 = {0,0,0,32767,0,0,1,32768};
#endif

  vucr = vec_pack (vusa, vusb);
  vscr = vec_pack (vssa, vssb);
  vbcr = vec_pack (vbsa, vbsb);
  vusr = vec_pack (vuia, vuib);
  vssr = vec_pack (vsia, vsib);
  vbsr = vec_pack (vbia, vbib);
  vpr  = vec_packpx (vipa, vipb);
  vucsr = vec_packs (vusc, vusd);
  vscsr = vec_packs (vssc, vssd);
  vussr = vec_packs (vuic, vuid);
  vsssr = vec_packs (vsic, vsid);
  vucsur1 = vec_packsu (vusc, vusd);
  vucsur2 = vec_packsu (vssc, vssd);
  vussur1 = vec_packsu (vuic, vuid);
  vussur2 = vec_packsu (vsic, vsid);

  check (vec_all_eq (vucr, vucer), "vucr");
  check (vec_all_eq (vscr, vscer), "vscr");
  check (vec_all_eq (vbcr, vbcer), "vbcr");
  check (vec_all_eq (vusr, vuser), "vusr");
  check (vec_all_eq (vssr, vsser), "vssr");
  check (vec_all_eq (vbsr, vbser), "vbsr");
  check (vec_all_eq (vpr,  vper ), "vpr" );
  check (vec_all_eq (vucsr, vucser), "vucsr");
  check (vec_all_eq (vscsr, vscser), "vscsr");
  check (vec_all_eq (vussr, vusser), "vussr");
  check (vec_all_eq (vsssr, vssser), "vsssr");
  check (vec_all_eq (vucsur1, vucsuer1), "vucsur1");
  check (vec_all_eq (vucsur2, vucsuer2), "vucsur2");
  check (vec_all_eq (vussur1, vussuer1), "vussur1");
  check (vec_all_eq (vussur2, vussuer2), "vussur2");
}
コード例 #22
0
 SIMD_INLINE void BgraToBayer(const Loader<align> & bgra, const v128_u8 perm[4][2], Storer<align> & bayer)
 {
     const v128_u16 lo = (v128_u16)vec_perm(Load<align, first>(bgra), Load<align, false>(bgra), perm[format][row]);
     const v128_u16 hi = (v128_u16)vec_perm(Load<align, false>(bgra), Load<align, false>(bgra), perm[format][row]);
     Store<align, first>(bayer, vec_pack(lo, hi));
 }
コード例 #23
0
ファイル: dsputil_altivec.c プロジェクト: 0xFFeng/ffmpeg
static int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
    int i;
    int s;
    uint8_t *pix3 = pix2 + line_size;
    const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
    const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
    vector unsigned char avgv, t5;
    vector unsigned char perm1 = vec_lvsl(0, pix2);
    vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
    vector unsigned char pix2l, pix2r;
    vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
    vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
    vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
    vector unsigned short avghv, avglv;
    vector unsigned short t1, t2, t3, t4;
    vector unsigned int sad;
    vector signed int sumdiffs;

    sad = (vector unsigned int)vec_splat_u32(0);

    s = 0;

    /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
       iteration becomes pix2 in the next iteration. We can use this
       fact to avoid a potentially expensive unaligned read, as well
       as some splitting, and vector addition each time around the loop.
       Read unaligned pixels into our vectors. The vectors are as follows:
       pix2v: pix2[0]-pix2[15]  pix2iv: pix2[1]-pix2[16]
       Split the pixel vectors into shorts */
    pix2l  = vec_ld( 0, pix2);
    pix2r  = vec_ld(16, pix2);
    pix2v  = vec_perm(pix2l, pix2r, perm1);
    pix2iv = vec_perm(pix2l, pix2r, perm2);

    pix2hv  = (vector unsigned short) vec_mergeh(zero, pix2v);
    pix2lv  = (vector unsigned short) vec_mergel(zero, pix2v);
    pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
    pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
    t1 = vec_add(pix2hv, pix2ihv);
    t2 = vec_add(pix2lv, pix2ilv);

    for (i = 0; i < h; i++) {
        /* Read unaligned pixels into our vectors. The vectors are as follows:
           pix1v: pix1[0]-pix1[15]
           pix3v: pix3[0]-pix3[15]      pix3iv: pix3[1]-pix3[16] */
        pix1v = vec_ld(0, pix1);

        pix2l  = vec_ld( 0, pix3);
        pix2r  = vec_ld(16, pix3);
        pix3v  = vec_perm(pix2l, pix2r, perm1);
        pix3iv = vec_perm(pix2l, pix2r, perm2);

        /* Note that AltiVec does have vec_avg, but this works on vector pairs
           and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
           would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
           Instead, we have to split the pixel vectors into vectors of shorts,
           and do the averaging by hand. */

        /* Split the pixel vectors into shorts */
        pix3hv  = (vector unsigned short) vec_mergeh(zero, pix3v);
        pix3lv  = (vector unsigned short) vec_mergel(zero, pix3v);
        pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
        pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);

        /* Do the averaging on them */
        t3 = vec_add(pix3hv, pix3ihv);
        t4 = vec_add(pix3lv, pix3ilv);

        avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
        avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);

        /* Pack the shorts back into a result */
        avgv = vec_pack(avghv, avglv);

        /* Calculate a sum of abs differences vector */
        t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));

        /* Add each 4 pixel group together and put 4 results into sad */
        sad = vec_sum4s(t5, sad);

        pix1 += line_size;
        pix3 += line_size;
        /* Transfer the calculated values for pix3 into pix2 */
        t1 = t3;
        t2 = t4;
    }
    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);

    return s;
}
コード例 #24
0
ファイル: p8vector-builtin-3.c プロジェクト: 0day-ci/gcc
vc_sign vc_pack_2 (vs_sign a, vs_sign b)
{
  return vec_pack (a, b);
}
コード例 #25
0
ファイル: vc1dsp_altivec.c プロジェクト: Acidburn0zzz/libav
/** Do inverse transform on 8x8 block
*/
static void vc1_inv_trans_8x8_altivec(int16_t block[64])
{
    vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
    vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
    vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
    vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
    const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
    const vector unsigned int vec_7 = vec_splat_u32(7);
    const vector unsigned int vec_4 = vec_splat_u32(4);
    const vector  signed int vec_4s = vec_splat_s32(4);
    const vector unsigned int vec_3 = vec_splat_u32(3);
    const vector unsigned int vec_2 = vec_splat_u32(2);
    const vector  signed int vec_1s = vec_splat_s32(1);
    const vector unsigned int vec_1 = vec_splat_u32(1);

    src0 = vec_ld(  0, block);
    src1 = vec_ld( 16, block);
    src2 = vec_ld( 32, block);
    src3 = vec_ld( 48, block);
    src4 = vec_ld( 64, block);
    src5 = vec_ld( 80, block);
    src6 = vec_ld( 96, block);
    src7 = vec_ld(112, block);

    s0 = vec_unpackl(src0);
    s1 = vec_unpackl(src1);
    s2 = vec_unpackl(src2);
    s3 = vec_unpackl(src3);
    s4 = vec_unpackl(src4);
    s5 = vec_unpackl(src5);
    s6 = vec_unpackl(src6);
    s7 = vec_unpackl(src7);
    s8 = vec_unpackh(src0);
    s9 = vec_unpackh(src1);
    sA = vec_unpackh(src2);
    sB = vec_unpackh(src3);
    sC = vec_unpackh(src4);
    sD = vec_unpackh(src5);
    sE = vec_unpackh(src6);
    sF = vec_unpackh(src7);
    STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
    SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
    STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
    SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
    src0 = vec_pack(s8, s0);
    src1 = vec_pack(s9, s1);
    src2 = vec_pack(sA, s2);
    src3 = vec_pack(sB, s3);
    src4 = vec_pack(sC, s4);
    src5 = vec_pack(sD, s5);
    src6 = vec_pack(sE, s6);
    src7 = vec_pack(sF, s7);
    TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);

    s0 = vec_unpackl(src0);
    s1 = vec_unpackl(src1);
    s2 = vec_unpackl(src2);
    s3 = vec_unpackl(src3);
    s4 = vec_unpackl(src4);
    s5 = vec_unpackl(src5);
    s6 = vec_unpackl(src6);
    s7 = vec_unpackl(src7);
    s8 = vec_unpackh(src0);
    s9 = vec_unpackh(src1);
    sA = vec_unpackh(src2);
    sB = vec_unpackh(src3);
    sC = vec_unpackh(src4);
    sD = vec_unpackh(src5);
    sE = vec_unpackh(src6);
    sF = vec_unpackh(src7);
    STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64);
    SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7);
    STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64);
    SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF);
    src0 = vec_pack(s8, s0);
    src1 = vec_pack(s9, s1);
    src2 = vec_pack(sA, s2);
    src3 = vec_pack(sB, s3);
    src4 = vec_pack(sC, s4);
    src5 = vec_pack(sD, s5);
    src6 = vec_pack(sE, s6);
    src7 = vec_pack(sF, s7);

    vec_st(src0,  0, block);
    vec_st(src1, 16, block);
    vec_st(src2, 32, block);
    vec_st(src3, 48, block);
    vec_st(src4, 64, block);
    vec_st(src5, 80, block);
    vec_st(src6, 96, block);
    vec_st(src7,112, block);
}
コード例 #26
0
ファイル: p8vector-builtin-3.c プロジェクト: 0day-ci/gcc
vi_sign vi_pack_2 (vll_sign a, vll_sign b)
{
  return vec_pack (a, b);
}
コード例 #27
0
ファイル: h264_altivec.c プロジェクト: BOTCrusher/sagetv
/* this code assume that stride % 16 == 0 */
void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
    signed int ABCD[4] __attribute__((aligned(16))) =
                        {((8 - x) * (8 - y)),
                          ((x) * (8 - y)),
                          ((8 - x) * (y)),
                          ((x) * (y))};
    register int i;
    vector unsigned char fperm;
    const vector signed int vABCD = vec_ld(0, ABCD);
    const vector signed short vA = vec_splat((vector signed short)vABCD, 1);
    const vector signed short vB = vec_splat((vector signed short)vABCD, 3);
    const vector signed short vC = vec_splat((vector signed short)vABCD, 5);
    const vector signed short vD = vec_splat((vector signed short)vABCD, 7);
    const vector signed int vzero = vec_splat_s32(0);
    const vector signed short v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
    const vector unsigned short v6us = vec_splat_u16(6);
    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;

    vector unsigned char vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
    vector unsigned char vsrc0uc, vsrc1uc;
    vector signed short vsrc0ssH, vsrc1ssH;
    vector unsigned char vsrcCuc, vsrc2uc, vsrc3uc;
    vector signed short vsrc2ssH, vsrc3ssH, psum;
    vector unsigned char vdst, ppsum, fsum;

    if (((unsigned long)dst) % 16 == 0) {
      fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13,
                                        0x14, 0x15, 0x16, 0x17,
                                        0x08, 0x09, 0x0A, 0x0B,
                                        0x0C, 0x0D, 0x0E, 0x0F);
    } else {
      fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03,
                                        0x04, 0x05, 0x06, 0x07,
                                        0x18, 0x19, 0x1A, 0x1B,
                                        0x1C, 0x1D, 0x1E, 0x1F);
    }

    vsrcAuc = vec_ld(0, src);

    if (loadSecond)
      vsrcBuc = vec_ld(16, src);
    vsrcperm0 = vec_lvsl(0, src);
    vsrcperm1 = vec_lvsl(1, src);

    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
    if (reallyBadAlign)
      vsrc1uc = vsrcBuc;
    else
      vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);

    vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
                                               (vector unsigned char)vsrc0uc);
    vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
                                               (vector unsigned char)vsrc1uc);

    if (!loadSecond) {// -> !reallyBadAlign
      for (i = 0 ; i < h ; i++) {


        vsrcCuc = vec_ld(stride + 0, src);

        vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
        vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);

        vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
                                                (vector unsigned char)vsrc2uc);
        vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
                                                (vector unsigned char)vsrc3uc);

        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v28ss, psum);
        psum = vec_sra(psum, v6us);

        vdst = vec_ld(0, dst);
        ppsum = (vector unsigned char)vec_packsu(psum, psum);
        fsum = vec_perm(vdst, ppsum, fperm);

        vec_st(fsum, 0, dst);

        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;

        dst += stride;
        src += stride;
      }
    } else {
        vector unsigned char vsrcDuc;
      for (i = 0 ; i < h ; i++) {
        vsrcCuc = vec_ld(stride + 0, src);
        vsrcDuc = vec_ld(stride + 16, src);

        vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
        if (reallyBadAlign)
          vsrc3uc = vsrcDuc;
        else
          vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);

        vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
                                                (vector unsigned char)vsrc2uc);
        vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
                                                (vector unsigned char)vsrc3uc);

        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
        psum = vec_mladd(vB, vsrc1ssH, psum);
        psum = vec_mladd(vC, vsrc2ssH, psum);
        psum = vec_mladd(vD, vsrc3ssH, psum);
        psum = vec_add(v28ss, psum);
        psum = vec_sr(psum, v6us);

        vdst = vec_ld(0, dst);
        ppsum = (vector unsigned char)vec_pack(psum, psum);
        fsum = vec_perm(vdst, ppsum, fperm);

        vec_st(fsum, 0, dst);

        vsrc0ssH = vsrc2ssH;
        vsrc1ssH = vsrc3ssH;

        dst += stride;
        src += stride;
      }
    }
}
コード例 #28
0
ファイル: vc1dsp_altivec.c プロジェクト: Acidburn0zzz/libav
/** Do inverse transform on 8x4 part of block
*/
static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, int16_t *block)
{
    vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
    vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
    vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
    vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
    const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
    const vector unsigned int vec_7 = vec_splat_u32(7);
    const vector unsigned int vec_5 = vec_splat_u32(5);
    const vector unsigned int vec_4 = vec_splat_u32(4);
    const vector  signed int vec_4s = vec_splat_s32(4);
    const vector unsigned int vec_3 = vec_splat_u32(3);
    const vector unsigned int vec_2 = vec_splat_u32(2);
    const vector unsigned int vec_1 = vec_splat_u32(1);
    vector unsigned char tmp;
    vector signed short tmp2, tmp3;
    vector unsigned char perm0, perm1, p0, p1, p;

    src0 = vec_ld(  0, block);
    src1 = vec_ld( 16, block);
    src2 = vec_ld( 32, block);
    src3 = vec_ld( 48, block);
    src4 = vec_ld( 64, block);
    src5 = vec_ld( 80, block);
    src6 = vec_ld( 96, block);
    src7 = vec_ld(112, block);

    TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
    s0 = vec_unpackl(src0);
    s1 = vec_unpackl(src1);
    s2 = vec_unpackl(src2);
    s3 = vec_unpackl(src3);
    s4 = vec_unpackl(src4);
    s5 = vec_unpackl(src5);
    s6 = vec_unpackl(src6);
    s7 = vec_unpackl(src7);
    s8 = vec_unpackh(src0);
    s9 = vec_unpackh(src1);
    sA = vec_unpackh(src2);
    sB = vec_unpackh(src3);
    sC = vec_unpackh(src4);
    sD = vec_unpackh(src5);
    sE = vec_unpackh(src6);
    sF = vec_unpackh(src7);
    STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
    SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
    STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
    SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
    src0 = vec_pack(s8, s0);
    src1 = vec_pack(s9, s1);
    src2 = vec_pack(sA, s2);
    src3 = vec_pack(sB, s3);
    src4 = vec_pack(sC, s4);
    src5 = vec_pack(sD, s5);
    src6 = vec_pack(sE, s6);
    src7 = vec_pack(sF, s7);
    TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);

    s0 = vec_unpackh(src0);
    s1 = vec_unpackh(src1);
    s2 = vec_unpackh(src2);
    s3 = vec_unpackh(src3);
    s8 = vec_unpackl(src0);
    s9 = vec_unpackl(src1);
    sA = vec_unpackl(src2);
    sB = vec_unpackl(src3);
    STEP4(s0, s1, s2, s3, vec_64);
    SHIFT_VERT4(s0, s1, s2, s3);
    STEP4(s8, s9, sA, sB, vec_64);
    SHIFT_VERT4(s8, s9, sA, sB);
    src0 = vec_pack(s0, s8);
    src1 = vec_pack(s1, s9);
    src2 = vec_pack(s2, sA);
    src3 = vec_pack(s3, sB);

    p0 = vec_lvsl (0, dest);
    p1 = vec_lvsl (stride, dest);
    p = vec_splat_u8 (-1);
    perm0 = vec_mergeh (p, p0);
    perm1 = vec_mergeh (p, p1);

#define ADD(dest,src,perm)                                              \
    /* *(uint64_t *)&tmp = *(uint64_t *)dest; */                        \
    tmp = vec_ld (0, dest);                                             \
    tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), perm);  \
    tmp3 = vec_adds (tmp2, src);                                        \
    tmp = vec_packsu (tmp3, tmp3);                                      \
    vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest);        \
    vec_ste ((vector unsigned int)tmp, 4, (unsigned int *)dest);

    ADD (dest, src0, perm0)      dest += stride;
    ADD (dest, src1, perm1)      dest += stride;
    ADD (dest, src2, perm0)      dest += stride;
    ADD (dest, src3, perm1)
}
コード例 #29
0
ファイル: tr_shade.c プロジェクト: ptitSeb/ioq3
static void ProjectDlightTexture_altivec( void ) {
	int		i, l;
	vec_t	origin0, origin1, origin2;
	float   texCoords0, texCoords1;
	vector float floatColorVec0, floatColorVec1;
	vector float modulateVec, colorVec, zero;
	vector short colorShort;
	vector signed int colorInt;
	vector unsigned char floatColorVecPerm, modulatePerm, colorChar;
	vector unsigned char vSel = VECCONST_UINT8(0x00, 0x00, 0x00, 0xff,
                                               0x00, 0x00, 0x00, 0xff,
                                               0x00, 0x00, 0x00, 0xff,
                                               0x00, 0x00, 0x00, 0xff);
	float	*texCoords;
	byte	*colors;
	byte	clipBits[SHADER_MAX_VERTEXES];
	float	texCoordsArray[SHADER_MAX_VERTEXES][2];
	byte	colorArray[SHADER_MAX_VERTEXES][4];
	unsigned	hitIndexes[SHADER_MAX_INDEXES];
	int		numIndexes;
	float	scale;
	float	radius;
	vec3_t	floatColor;
	float	modulate = 0.0f;

	if ( !backEnd.refdef.num_dlights ) {
		return;
	}

	// There has to be a better way to do this so that floatColor
	// and/or modulate are already 16-byte aligned.
	floatColorVecPerm = vec_lvsl(0,(float *)floatColor);
	modulatePerm = vec_lvsl(0,(float *)&modulate);
	modulatePerm = (vector unsigned char)vec_splat((vector unsigned int)modulatePerm,0);
	zero = (vector float)vec_splat_s8(0);

	for ( l = 0 ; l < backEnd.refdef.num_dlights ; l++ ) {
		dlight_t	*dl;

		if ( !( tess.dlightBits & ( 1 << l ) ) ) {
			continue;	// this surface definately doesn't have any of this light
		}
		texCoords = texCoordsArray[0];
		colors = colorArray[0];

		dl = &backEnd.refdef.dlights[l];
		origin0 = dl->transformed[0];
		origin1 = dl->transformed[1];
		origin2 = dl->transformed[2];
		radius = dl->radius;
		scale = 1.0f / radius;

		if(r_greyscale->integer)
		{
			float luminance;
			
			luminance = LUMA(dl->color[0], dl->color[1], dl->color[2]) * 255.0f;
			floatColor[0] = floatColor[1] = floatColor[2] = luminance;
		}
		else if(r_greyscale->value)
		{
			float luminance;
			
			luminance = LUMA(dl->color[0], dl->color[1], dl->color[2]) * 255.0f;
			floatColor[0] = LERP(dl->color[0] * 255.0f, luminance, r_greyscale->value);
			floatColor[1] = LERP(dl->color[1] * 255.0f, luminance, r_greyscale->value);
			floatColor[2] = LERP(dl->color[2] * 255.0f, luminance, r_greyscale->value);
		}
		else
		{
			floatColor[0] = dl->color[0] * 255.0f;
			floatColor[1] = dl->color[1] * 255.0f;
			floatColor[2] = dl->color[2] * 255.0f;
		}
		floatColorVec0 = vec_ld(0, floatColor);
		floatColorVec1 = vec_ld(11, floatColor);
		floatColorVec0 = vec_perm(floatColorVec0,floatColorVec0,floatColorVecPerm);
		for ( i = 0 ; i < tess.numVertexes ; i++, texCoords += 2, colors += 4 ) {
			int		clip = 0;
			vec_t dist0, dist1, dist2;
			
			dist0 = origin0 - tess.xyz[i][0];
			dist1 = origin1 - tess.xyz[i][1];
			dist2 = origin2 - tess.xyz[i][2];

			backEnd.pc.c_dlightVertexes++;

			texCoords0 = 0.5f + dist0 * scale;
			texCoords1 = 0.5f + dist1 * scale;

			if( !r_dlightBacks->integer &&
					// dist . tess.normal[i]
					( dist0 * tess.normal[i][0] +
					dist1 * tess.normal[i][1] +
					dist2 * tess.normal[i][2] ) < 0.0f ) {
				clip = 63;
			} else {
				if ( texCoords0 < 0.0f ) {
					clip |= 1;
				} else if ( texCoords0 > 1.0f ) {
					clip |= 2;
				}
				if ( texCoords1 < 0.0f ) {
					clip |= 4;
				} else if ( texCoords1 > 1.0f ) {
					clip |= 8;
				}
				texCoords[0] = texCoords0;
				texCoords[1] = texCoords1;

				// modulate the strength based on the height and color
				if ( dist2 > radius ) {
					clip |= 16;
					modulate = 0.0f;
				} else if ( dist2 < -radius ) {
					clip |= 32;
					modulate = 0.0f;
				} else {
					dist2 = Q_fabs(dist2);
					if ( dist2 < radius * 0.5f ) {
						modulate = 1.0f;
					} else {
						modulate = 2.0f * (radius - dist2) * scale;
					}
				}
			}
			clipBits[i] = clip;

			modulateVec = vec_ld(0,(float *)&modulate);
			modulateVec = vec_perm(modulateVec,modulateVec,modulatePerm);
			colorVec = vec_madd(floatColorVec0,modulateVec,zero);
			colorInt = vec_cts(colorVec,0);	// RGBx
			colorShort = vec_pack(colorInt,colorInt);		// RGBxRGBx
			colorChar = vec_packsu(colorShort,colorShort);	// RGBxRGBxRGBxRGBx
			colorChar = vec_sel(colorChar,vSel,vSel);		// RGBARGBARGBARGBA replace alpha with 255
			vec_ste((vector unsigned int)colorChar,0,(unsigned int *)colors);	// store color
		}

		// build a list of triangles that need light
		numIndexes = 0;
		for ( i = 0 ; i < tess.numIndexes ; i += 3 ) {
			int		a, b, c;

			a = tess.indexes[i];
			b = tess.indexes[i+1];
			c = tess.indexes[i+2];
			if ( clipBits[a] & clipBits[b] & clipBits[c] ) {
				continue;	// not lighted
			}
			hitIndexes[numIndexes] = a;
			hitIndexes[numIndexes+1] = b;
			hitIndexes[numIndexes+2] = c;
			numIndexes += 3;
		}

		if ( !numIndexes ) {
			continue;
		}

		qglEnableClientState( GL_TEXTURE_COORD_ARRAY );
		qglTexCoordPointer( 2, GL_FLOAT, 0, texCoordsArray[0] );

		qglEnableClientState( GL_COLOR_ARRAY );
		qglColorPointer( 4, GL_UNSIGNED_BYTE, 0, colorArray );

		GL_Bind( tr.dlightImage );
		// include GLS_DEPTHFUNC_EQUAL so alpha tested surfaces don't add light
		// where they aren't rendered
		if ( dl->additive ) {
			GL_State( GLS_SRCBLEND_ONE | GLS_DSTBLEND_ONE | GLS_DEPTHFUNC_EQUAL );
		}
		else {
			GL_State( GLS_SRCBLEND_DST_COLOR | GLS_DSTBLEND_ONE | GLS_DEPTHFUNC_EQUAL );
		}
		R_DrawElements( numIndexes, hitIndexes );
		backEnd.pc.c_totalIndexes += numIndexes;
		backEnd.pc.c_dlightIndexes += numIndexes;
	}
}
コード例 #30
0
void jsimd_rgb_gray_convert_altivec (JDIMENSION img_width,
                                     JSAMPARRAY input_buf,
                                     JSAMPIMAGE output_buf,
                                     JDIMENSION output_row, int num_rows)
{
  JSAMPROW inptr, outptr;
  int pitch;
  __vector unsigned char rgb0, rgb1 = {0}, rgb2 = {0}, rgb3 = {0}, rgbg0,
    rgbg1, rgbg2, rgbg3, y;
#if RGB_PIXELSIZE == 4
  __vector unsigned char rgb4;
#endif
  __vector short rg0, rg1, rg2, rg3, bg0, bg1, bg2, bg3;
  __vector unsigned short y01, y23;
  __vector int y0, y1, y2, y3;

  /* Constants */
  __vector short pw_f0299_f0337 = { __4X2(F_0_299, F_0_337) },
    pw_f0114_f0250 = { __4X2(F_0_114, F_0_250) };
  __vector int pd_onehalf = { __4X(ONE_HALF) };
  __vector unsigned char zero = { __16X(0) },
    shift_pack_index =
      { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29};

  while (--num_rows >= 0) {
    inptr = *input_buf++;
    outptr = output_buf[0][output_row];
    output_row++;

    for (pitch = img_width * RGB_PIXELSIZE; pitch > 0;
         pitch -= RGB_PIXELSIZE * 16, inptr += RGB_PIXELSIZE * 16,
         outptr += 16) {

#if RGB_PIXELSIZE == 3
      /* Load 16 pixels == 48 bytes */
      if ((size_t)inptr & 15) {
        __vector unsigned char unaligned_shift_index;
        rgb0 = vec_ld(0, inptr);
        if (pitch > 16)
          rgb1 = vec_ld(16, inptr);
        else
          rgb1 = vec_ld(-1, inptr + pitch);
        if (pitch > 32)
          rgb2 = vec_ld(32, inptr);
        else
          rgb2 = vec_ld(-1, inptr + pitch);
        if (pitch > 48)
          rgb3 = vec_ld(48, inptr);
        else
          rgb3 = vec_ld(-1, inptr + pitch);
        unaligned_shift_index = vec_lvsl(0, inptr);
        rgb0 = vec_perm(rgb0, rgb1, unaligned_shift_index);
        rgb1 = vec_perm(rgb1, rgb2, unaligned_shift_index);
        rgb2 = vec_perm(rgb2, rgb3, unaligned_shift_index);
      } else {
        rgb0 = vec_ld(0, inptr);
        if (pitch > 16)
          rgb1 = vec_ld(16, inptr);
        if (pitch > 32)
          rgb2 = vec_ld(32, inptr);
      }

      /* rgb0 = R0 G0 B0 R1 G1 B1 R2 G2 B2 R3 G3 B3 R4 G4 B4 R5
       * rgb1 = G5 B5 R6 G6 B6 R7 G7 B7 R8 G8 B8 R9 G9 B9 Ra Ga
       * rgb2 = Ba Rb Gb Bb Rc Gc Bc Rd Gd Bd Re Ge Be Rf Gf Bf
       *
       * rgbg0 = R0 G0 R1 G1 R2 G2 R3 G3 B0 G0 B1 G1 B2 G2 B3 G3
       * rgbg1 = R4 G4 R5 G5 R6 G6 R7 G7 B4 G4 B5 G5 B6 G6 B7 G7
       * rgbg2 = R8 G8 R9 G9 Ra Ga Rb Gb B8 G8 B9 G9 Ba Ga Bb Gb
       * rgbg3 = Rc Gc Rd Gd Re Ge Rf Gf Bc Gc Bd Gd Be Ge Bf Gf
       */
      rgbg0 = vec_perm(rgb0, rgb0, (__vector unsigned char)RGBG_INDEX0);
      rgbg1 = vec_perm(rgb0, rgb1, (__vector unsigned char)RGBG_INDEX1);
      rgbg2 = vec_perm(rgb1, rgb2, (__vector unsigned char)RGBG_INDEX2);
      rgbg3 = vec_perm(rgb2, rgb2, (__vector unsigned char)RGBG_INDEX3);
#else
      /* Load 16 pixels == 64 bytes */
      if ((size_t)inptr & 15) {
        __vector unsigned char unaligned_shift_index;
        rgb0 = vec_ld(0, inptr);
        if (pitch > 16)
          rgb1 = vec_ld(16, inptr);
        else
          rgb1 = vec_ld(-1, inptr + pitch);
        if (pitch > 32)
          rgb2 = vec_ld(32, inptr);
        else
          rgb2 = vec_ld(-1, inptr + pitch);
        if (pitch > 48)
          rgb3 = vec_ld(48, inptr);
        else
          rgb3 = vec_ld(-1, inptr + pitch);
        if (pitch > 64)
          rgb4 = vec_ld(64, inptr);
        else
          rgb4 = vec_ld(-1, inptr + pitch);
        unaligned_shift_index = vec_lvsl(0, inptr);
        rgb0 = vec_perm(rgb0, rgb1, unaligned_shift_index);
        rgb1 = vec_perm(rgb1, rgb2, unaligned_shift_index);
        rgb2 = vec_perm(rgb2, rgb3, unaligned_shift_index);
        rgb3 = vec_perm(rgb3, rgb4, unaligned_shift_index);
      } else {
        rgb0 = vec_ld(0, inptr);
        if (pitch > 16)
          rgb1 = vec_ld(16, inptr);
        if (pitch > 32)
          rgb2 = vec_ld(32, inptr);
        if (pitch > 48)
          rgb3 = vec_ld(48, inptr);
      }

      /* rgb0 = R0 G0 B0 X0 R1 G1 B1 X1 R2 G2 B2 X2 R3 G3 B3 X3
       * rgb0 = R4 G4 B4 X4 R5 G5 B5 X5 R6 G6 B6 X6 R7 G7 B7 X7
       * rgb0 = R8 G8 B8 X8 R9 G9 B9 X9 Ra Ga Ba Xa Rb Gb Bb Xb
       * rgb0 = Rc Gc Bc Xc Rd Gd Bd Xd Re Ge Be Xe Rf Gf Bf Xf
       *
       * rgbg0 = R0 G0 R1 G1 R2 G2 R3 G3 B0 G0 B1 G1 B2 G2 B3 G3
       * rgbg1 = R4 G4 R5 G5 R6 G6 R7 G7 B4 G4 B5 G5 B6 G6 B7 G7
       * rgbg2 = R8 G8 R9 G9 Ra Ga Rb Gb B8 G8 B9 G9 Ba Ga Bb Gb
       * rgbg3 = Rc Gc Rd Gd Re Ge Rf Gf Bc Gc Bd Gd Be Ge Bf Gf
       */
      rgbg0 = vec_perm(rgb0, rgb0, (__vector unsigned char)RGBG_INDEX);
      rgbg1 = vec_perm(rgb1, rgb1, (__vector unsigned char)RGBG_INDEX);
      rgbg2 = vec_perm(rgb2, rgb2, (__vector unsigned char)RGBG_INDEX);
      rgbg3 = vec_perm(rgb3, rgb3, (__vector unsigned char)RGBG_INDEX);
#endif

      /* rg0 = R0 G0 R1 G1 R2 G2 R3 G3
       * bg0 = B0 G0 B1 G1 B2 G2 B3 G3
       * ...
       *
       * NOTE: We have to use vec_merge*() here because vec_unpack*() doesn't
       * support unsigned vectors.
       */
      rg0 = (__vector signed short)vec_mergeh(zero, rgbg0);
      bg0 = (__vector signed short)vec_mergel(zero, rgbg0);
      rg1 = (__vector signed short)vec_mergeh(zero, rgbg1);
      bg1 = (__vector signed short)vec_mergel(zero, rgbg1);
      rg2 = (__vector signed short)vec_mergeh(zero, rgbg2);
      bg2 = (__vector signed short)vec_mergel(zero, rgbg2);
      rg3 = (__vector signed short)vec_mergeh(zero, rgbg3);
      bg3 = (__vector signed short)vec_mergel(zero, rgbg3);

      /* (Original)
       * Y  =  0.29900 * R + 0.58700 * G + 0.11400 * B
       *
       * (This implementation)
       * Y  =  0.29900 * R + 0.33700 * G + 0.11400 * B + 0.25000 * G
       */

      /* Calculate Y values */

      y0 = vec_msums(rg0, pw_f0299_f0337, pd_onehalf);
      y1 = vec_msums(rg1, pw_f0299_f0337, pd_onehalf);
      y2 = vec_msums(rg2, pw_f0299_f0337, pd_onehalf);
      y3 = vec_msums(rg3, pw_f0299_f0337, pd_onehalf);
      y0 = vec_msums(bg0, pw_f0114_f0250, y0);
      y1 = vec_msums(bg1, pw_f0114_f0250, y1);
      y2 = vec_msums(bg2, pw_f0114_f0250, y2);
      y3 = vec_msums(bg3, pw_f0114_f0250, y3);
      /* Clever way to avoid 4 shifts + 2 packs.  This packs the high word from
       * each dword into a new 16-bit vector, which is the equivalent of
       * descaling the 32-bit results (right-shifting by 16 bits) and then
       * packing them.
       */
      y01 = vec_perm((__vector unsigned short)y0, (__vector unsigned short)y1,
                     shift_pack_index);
      y23 = vec_perm((__vector unsigned short)y2, (__vector unsigned short)y3,
                     shift_pack_index);
      y = vec_pack(y01, y23);
      vec_st(y, 0, outptr);
    }
  }
}