void gimp_composite_dodge_rgba8_rgba8_rgba8_altivec (GimpCompositeContext *ctx) { const guchar *A = ctx->A; const guchar *B = ctx->B; guchar *D = ctx->D; guint length = ctx->n_pixels; vector unsigned char a,b,d; vector unsigned char alpha_a,alpha_b,alpha; vector signed short ox0001=vec_splat_s16(1); union { vector signed short v; vector unsigned short vu; gushort u16[8]; } ah,al,bh,bl; while (length >= 4) { a=LoadUnaligned(A); b=LoadUnaligned(B); alpha_a=vec_and(a, alphamask); alpha_b=vec_and(b, alphamask); alpha=vec_min(alpha_a, alpha_b); ah.v=vec_unpackh((vector signed char)a); ah.v=vec_sl(ah.v,ox0008); al.v=vec_unpackl((vector signed char)a); al.v=vec_sl(al.v,ox0008); b=vec_nor(b,b); bh.v=vec_unpackh((vector signed char)b); bh.v=vec_and(bh.v,ox00ff); bh.v=vec_add(bh.v,ox0001); bl.v=vec_unpackl((vector signed char)b); bl.v=vec_and(bl.v,ox00ff); bl.v=vec_add(bl.v,ox0001); ah.u16[0]=ah.u16[0]/bh.u16[0]; ah.u16[1]=ah.u16[1]/bh.u16[1]; ah.u16[2]=ah.u16[2]/bh.u16[2]; ah.u16[4]=ah.u16[4]/bh.u16[4]; ah.u16[5]=ah.u16[5]/bh.u16[5]; ah.u16[6]=ah.u16[6]/bh.u16[6]; al.u16[0]=al.u16[0]/bl.u16[0]; al.u16[1]=al.u16[1]/bl.u16[1]; al.u16[2]=al.u16[2]/bl.u16[2]; al.u16[4]=al.u16[4]/bl.u16[4]; al.u16[5]=al.u16[5]/bl.u16[5]; al.u16[6]=al.u16[6]/bl.u16[6]; d=vec_packs(ah.vu,al.vu); d=vec_andc(d, alphamask); d=vec_or(d, alpha); StoreUnaligned(d, D); A+=16; B+=16; D+=16; length-=4; } length = length*4; a=LoadUnalignedLess(A, length); b=LoadUnalignedLess(B, length); alpha_a=vec_and(a, alphamask); alpha_b=vec_and(b, alphamask); alpha=vec_min(alpha_a, alpha_b); ah.v=vec_unpackh((vector signed char)a); ah.v=vec_sl(ah.v,ox0008); al.v=vec_unpackl((vector signed char)a); al.v=vec_sl(al.v,ox0008); b=vec_nor(b,b); bh.v=vec_unpackh((vector signed char)b); bh.v=vec_and(bh.v,ox00ff); bh.v=vec_add(bh.v,ox0001); bl.v=vec_unpackl((vector signed char)b); bl.v=vec_and(bl.v,ox00ff); bl.v=vec_add(bl.v,ox0001); ah.u16[0]=ah.u16[0]/bh.u16[0]; ah.u16[1]=ah.u16[1]/bh.u16[1]; ah.u16[2]=ah.u16[2]/bh.u16[2]; ah.u16[4]=ah.u16[4]/bh.u16[4]; ah.u16[5]=ah.u16[5]/bh.u16[5]; ah.u16[6]=ah.u16[6]/bh.u16[6]; al.u16[0]=al.u16[0]/bl.u16[0]; al.u16[1]=al.u16[1]/bl.u16[1]; al.u16[2]=al.u16[2]/bl.u16[2]; al.u16[4]=al.u16[4]/bl.u16[4]; al.u16[5]=al.u16[5]/bl.u16[5]; al.u16[6]=al.u16[6]/bl.u16[6]; d=vec_packs(ah.vu,al.vu); d=vec_andc(d, alphamask); d=vec_or(d, alpha); StoreUnalignedLess(d, D, length); }
int dct_quantize_altivec(MpegEncContext* s, DCTELEM* data, int n, int qscale, int* overflow) { int lastNonZero; vector float row0, row1, row2, row3, row4, row5, row6, row7; vector float alt0, alt1, alt2, alt3, alt4, alt5, alt6, alt7; const vector float zero = {FOUR_INSTANCES(0.0f)}; // Load the data into the row/alt vectors { vector signed short data0, data1, data2, data3, data4, data5, data6, data7; data0 = vec_ld(0, data); data1 = vec_ld(16, data); data2 = vec_ld(32, data); data3 = vec_ld(48, data); data4 = vec_ld(64, data); data5 = vec_ld(80, data); data6 = vec_ld(96, data); data7 = vec_ld(112, data); // Transpose the data before we start TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7); // load the data into floating point vectors. We load // the high half of each row into the main row vectors // and the low half into the alt vectors. row0 = vec_ctf(vec_unpackh(data0), 0); alt0 = vec_ctf(vec_unpackl(data0), 0); row1 = vec_ctf(vec_unpackh(data1), 0); alt1 = vec_ctf(vec_unpackl(data1), 0); row2 = vec_ctf(vec_unpackh(data2), 0); alt2 = vec_ctf(vec_unpackl(data2), 0); row3 = vec_ctf(vec_unpackh(data3), 0); alt3 = vec_ctf(vec_unpackl(data3), 0); row4 = vec_ctf(vec_unpackh(data4), 0); alt4 = vec_ctf(vec_unpackl(data4), 0); row5 = vec_ctf(vec_unpackh(data5), 0); alt5 = vec_ctf(vec_unpackl(data5), 0); row6 = vec_ctf(vec_unpackh(data6), 0); alt6 = vec_ctf(vec_unpackl(data6), 0); row7 = vec_ctf(vec_unpackh(data7), 0); alt7 = vec_ctf(vec_unpackl(data7), 0); } // The following block could exist as a separate an altivec dct // function. However, if we put it inline, the DCT data can remain // in the vector local variables, as floats, which we'll use during the // quantize step... { const vector float vec_0_298631336 = {FOUR_INSTANCES(0.298631336f)}; const vector float vec_0_390180644 = {FOUR_INSTANCES(-0.390180644f)}; const vector float vec_0_541196100 = {FOUR_INSTANCES(0.541196100f)}; const vector float vec_0_765366865 = {FOUR_INSTANCES(0.765366865f)}; const vector float vec_0_899976223 = {FOUR_INSTANCES(-0.899976223f)}; const vector float vec_1_175875602 = {FOUR_INSTANCES(1.175875602f)}; const vector float vec_1_501321110 = {FOUR_INSTANCES(1.501321110f)}; const vector float vec_1_847759065 = {FOUR_INSTANCES(-1.847759065f)}; const vector float vec_1_961570560 = {FOUR_INSTANCES(-1.961570560f)}; const vector float vec_2_053119869 = {FOUR_INSTANCES(2.053119869f)}; const vector float vec_2_562915447 = {FOUR_INSTANCES(-2.562915447f)}; const vector float vec_3_072711026 = {FOUR_INSTANCES(3.072711026f)}; int whichPass, whichHalf; for(whichPass = 1; whichPass<=2; whichPass++) { for(whichHalf = 1; whichHalf<=2; whichHalf++) { vector float tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; vector float tmp10, tmp11, tmp12, tmp13; vector float z1, z2, z3, z4, z5; tmp0 = vec_add(row0, row7); // tmp0 = dataptr[0] + dataptr[7]; tmp7 = vec_sub(row0, row7); // tmp7 = dataptr[0] - dataptr[7]; tmp3 = vec_add(row3, row4); // tmp3 = dataptr[3] + dataptr[4]; tmp4 = vec_sub(row3, row4); // tmp4 = dataptr[3] - dataptr[4]; tmp1 = vec_add(row1, row6); // tmp1 = dataptr[1] + dataptr[6]; tmp6 = vec_sub(row1, row6); // tmp6 = dataptr[1] - dataptr[6]; tmp2 = vec_add(row2, row5); // tmp2 = dataptr[2] + dataptr[5]; tmp5 = vec_sub(row2, row5); // tmp5 = dataptr[2] - dataptr[5]; tmp10 = vec_add(tmp0, tmp3); // tmp10 = tmp0 + tmp3; tmp13 = vec_sub(tmp0, tmp3); // tmp13 = tmp0 - tmp3; tmp11 = vec_add(tmp1, tmp2); // tmp11 = tmp1 + tmp2; tmp12 = vec_sub(tmp1, tmp2); // tmp12 = tmp1 - tmp2; // dataptr[0] = (DCTELEM) ((tmp10 + tmp11) << PASS1_BITS); row0 = vec_add(tmp10, tmp11); // dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS); row4 = vec_sub(tmp10, tmp11); // z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); z1 = vec_madd(vec_add(tmp12, tmp13), vec_0_541196100, (vector float)zero); // dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), // CONST_BITS-PASS1_BITS); row2 = vec_madd(tmp13, vec_0_765366865, z1); // dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), // CONST_BITS-PASS1_BITS); row6 = vec_madd(tmp12, vec_1_847759065, z1); z1 = vec_add(tmp4, tmp7); // z1 = tmp4 + tmp7; z2 = vec_add(tmp5, tmp6); // z2 = tmp5 + tmp6; z3 = vec_add(tmp4, tmp6); // z3 = tmp4 + tmp6; z4 = vec_add(tmp5, tmp7); // z4 = tmp5 + tmp7; // z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ z5 = vec_madd(vec_add(z3, z4), vec_1_175875602, (vector float)zero); // z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ z3 = vec_madd(z3, vec_1_961570560, z5); // z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ z4 = vec_madd(z4, vec_0_390180644, z5); // The following adds are rolled into the multiplies above // z3 = vec_add(z3, z5); // z3 += z5; // z4 = vec_add(z4, z5); // z4 += z5; // z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ // Wow! It's actually more effecient to roll this multiply // into the adds below, even thought the multiply gets done twice! // z2 = vec_madd(z2, vec_2_562915447, (vector float)zero); // z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ // Same with this one... // z1 = vec_madd(z1, vec_0_899976223, (vector float)zero); // tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ // dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); row7 = vec_madd(tmp4, vec_0_298631336, vec_madd(z1, vec_0_899976223, z3)); // tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ // dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); row5 = vec_madd(tmp5, vec_2_053119869, vec_madd(z2, vec_2_562915447, z4)); // tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ // dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); row3 = vec_madd(tmp6, vec_3_072711026, vec_madd(z2, vec_2_562915447, z3)); // tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ // dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); row1 = vec_madd(z1, vec_0_899976223, vec_madd(tmp7, vec_1_501321110, z4)); // Swap the row values with the alts. If this is the first half, // this sets up the low values to be acted on in the second half. // If this is the second half, it puts the high values back in // the row values where they are expected to be when we're done. SWAP(row0, alt0); SWAP(row1, alt1); SWAP(row2, alt2); SWAP(row3, alt3); SWAP(row4, alt4); SWAP(row5, alt5); SWAP(row6, alt6); SWAP(row7, alt7); } if (whichPass == 1) { // transpose the data for the second pass // First, block transpose the upper right with lower left. SWAP(row4, alt0); SWAP(row5, alt1); SWAP(row6, alt2); SWAP(row7, alt3); // Now, transpose each block of four TRANSPOSE4(row0, row1, row2, row3); TRANSPOSE4(row4, row5, row6, row7); TRANSPOSE4(alt0, alt1, alt2, alt3); TRANSPOSE4(alt4, alt5, alt6, alt7); } } } // used after quantise step int oldBaseValue = 0; // perform the quantise step, using the floating point data // still in the row/alt registers { const int* biasAddr; const vector signed int* qmat; vector float bias, negBias; if (s->mb_intra) { vector signed int baseVector; // We must cache element 0 in the intra case // (it needs special handling). baseVector = vec_cts(vec_splat(row0, 0), 0); vec_ste(baseVector, 0, &oldBaseValue); qmat = (vector signed int*)s->q_intra_matrix[qscale]; biasAddr = &(s->intra_quant_bias); } else { qmat = (vector signed int*)s->q_inter_matrix[qscale]; biasAddr = &(s->inter_quant_bias); } // Load the bias vector (We add 0.5 to the bias so that we're // rounding when we convert to int, instead of flooring.) { vector signed int biasInt; const vector float negOneFloat = (vector float)(FOUR_INSTANCES(-1.0f)); LOAD4(biasInt, biasAddr); bias = vec_ctf(biasInt, QUANT_BIAS_SHIFT); negBias = vec_madd(bias, negOneFloat, zero); } { vector float q0, q1, q2, q3, q4, q5, q6, q7; q0 = vec_ctf(qmat[0], QMAT_SHIFT); q1 = vec_ctf(qmat[2], QMAT_SHIFT); q2 = vec_ctf(qmat[4], QMAT_SHIFT); q3 = vec_ctf(qmat[6], QMAT_SHIFT); q4 = vec_ctf(qmat[8], QMAT_SHIFT); q5 = vec_ctf(qmat[10], QMAT_SHIFT); q6 = vec_ctf(qmat[12], QMAT_SHIFT); q7 = vec_ctf(qmat[14], QMAT_SHIFT); row0 = vec_sel(vec_madd(row0, q0, negBias), vec_madd(row0, q0, bias), vec_cmpgt(row0, zero)); row1 = vec_sel(vec_madd(row1, q1, negBias), vec_madd(row1, q1, bias), vec_cmpgt(row1, zero)); row2 = vec_sel(vec_madd(row2, q2, negBias), vec_madd(row2, q2, bias), vec_cmpgt(row2, zero)); row3 = vec_sel(vec_madd(row3, q3, negBias), vec_madd(row3, q3, bias), vec_cmpgt(row3, zero)); row4 = vec_sel(vec_madd(row4, q4, negBias), vec_madd(row4, q4, bias), vec_cmpgt(row4, zero)); row5 = vec_sel(vec_madd(row5, q5, negBias), vec_madd(row5, q5, bias), vec_cmpgt(row5, zero)); row6 = vec_sel(vec_madd(row6, q6, negBias), vec_madd(row6, q6, bias), vec_cmpgt(row6, zero)); row7 = vec_sel(vec_madd(row7, q7, negBias), vec_madd(row7, q7, bias), vec_cmpgt(row7, zero)); q0 = vec_ctf(qmat[1], QMAT_SHIFT); q1 = vec_ctf(qmat[3], QMAT_SHIFT); q2 = vec_ctf(qmat[5], QMAT_SHIFT); q3 = vec_ctf(qmat[7], QMAT_SHIFT); q4 = vec_ctf(qmat[9], QMAT_SHIFT); q5 = vec_ctf(qmat[11], QMAT_SHIFT); q6 = vec_ctf(qmat[13], QMAT_SHIFT); q7 = vec_ctf(qmat[15], QMAT_SHIFT); alt0 = vec_sel(vec_madd(alt0, q0, negBias), vec_madd(alt0, q0, bias), vec_cmpgt(alt0, zero)); alt1 = vec_sel(vec_madd(alt1, q1, negBias), vec_madd(alt1, q1, bias), vec_cmpgt(alt1, zero)); alt2 = vec_sel(vec_madd(alt2, q2, negBias), vec_madd(alt2, q2, bias), vec_cmpgt(alt2, zero)); alt3 = vec_sel(vec_madd(alt3, q3, negBias), vec_madd(alt3, q3, bias), vec_cmpgt(alt3, zero)); alt4 = vec_sel(vec_madd(alt4, q4, negBias), vec_madd(alt4, q4, bias), vec_cmpgt(alt4, zero)); alt5 = vec_sel(vec_madd(alt5, q5, negBias), vec_madd(alt5, q5, bias), vec_cmpgt(alt5, zero)); alt6 = vec_sel(vec_madd(alt6, q6, negBias), vec_madd(alt6, q6, bias), vec_cmpgt(alt6, zero)); alt7 = vec_sel(vec_madd(alt7, q7, negBias), vec_madd(alt7, q7, bias), vec_cmpgt(alt7, zero)); } } // Store the data back into the original block { vector signed short data0, data1, data2, data3, data4, data5, data6, data7; data0 = vec_pack(vec_cts(row0, 0), vec_cts(alt0, 0)); data1 = vec_pack(vec_cts(row1, 0), vec_cts(alt1, 0)); data2 = vec_pack(vec_cts(row2, 0), vec_cts(alt2, 0)); data3 = vec_pack(vec_cts(row3, 0), vec_cts(alt3, 0)); data4 = vec_pack(vec_cts(row4, 0), vec_cts(alt4, 0)); data5 = vec_pack(vec_cts(row5, 0), vec_cts(alt5, 0)); data6 = vec_pack(vec_cts(row6, 0), vec_cts(alt6, 0)); data7 = vec_pack(vec_cts(row7, 0), vec_cts(alt7, 0)); { // Clamp for overflow vector signed int max_q_int, min_q_int; vector signed short max_q, min_q; LOAD4(max_q_int, &(s->max_qcoeff)); LOAD4(min_q_int, &(s->min_qcoeff)); max_q = vec_pack(max_q_int, max_q_int); min_q = vec_pack(min_q_int, min_q_int); data0 = vec_max(vec_min(data0, max_q), min_q); data1 = vec_max(vec_min(data1, max_q), min_q); data2 = vec_max(vec_min(data2, max_q), min_q); data4 = vec_max(vec_min(data4, max_q), min_q); data5 = vec_max(vec_min(data5, max_q), min_q); data6 = vec_max(vec_min(data6, max_q), min_q); data7 = vec_max(vec_min(data7, max_q), min_q); } vector bool char zero_01, zero_23, zero_45, zero_67; vector signed char scanIndices_01, scanIndices_23, scanIndices_45, scanIndices_67; vector signed char negOne = vec_splat_s8(-1); vector signed char* scanPtr = (vector signed char*)(s->intra_scantable.inverse); // Determine the largest non-zero index. zero_01 = vec_pack(vec_cmpeq(data0, (vector short)zero), vec_cmpeq(data1, (vector short)zero)); zero_23 = vec_pack(vec_cmpeq(data2, (vector short)zero), vec_cmpeq(data3, (vector short)zero)); zero_45 = vec_pack(vec_cmpeq(data4, (vector short)zero), vec_cmpeq(data5, (vector short)zero)); zero_67 = vec_pack(vec_cmpeq(data6, (vector short)zero), vec_cmpeq(data7, (vector short)zero)); // 64 biggest values scanIndices_01 = vec_sel(scanPtr[0], negOne, zero_01); scanIndices_23 = vec_sel(scanPtr[1], negOne, zero_23); scanIndices_45 = vec_sel(scanPtr[2], negOne, zero_45); scanIndices_67 = vec_sel(scanPtr[3], negOne, zero_67); // 32 largest values scanIndices_01 = vec_max(scanIndices_01, scanIndices_23); scanIndices_45 = vec_max(scanIndices_45, scanIndices_67); // 16 largest values scanIndices_01 = vec_max(scanIndices_01, scanIndices_45); // 8 largest values scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne), vec_mergel(scanIndices_01, negOne)); // 4 largest values scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne), vec_mergel(scanIndices_01, negOne)); // 2 largest values scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne), vec_mergel(scanIndices_01, negOne)); // largest value scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne), vec_mergel(scanIndices_01, negOne)); scanIndices_01 = vec_splat(scanIndices_01, 0); signed char lastNonZeroChar; vec_ste(scanIndices_01, 0, &lastNonZeroChar); lastNonZero = lastNonZeroChar; // While the data is still in vectors we check for the transpose IDCT permute // and handle it using the vector unit if we can. This is the permute used // by the altivec idct, so it is common when using the altivec dct. if ((lastNonZero > 0) && (s->idct_permutation_type == FF_TRANSPOSE_IDCT_PERM)) { TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7); } vec_st(data0, 0, data); vec_st(data1, 16, data); vec_st(data2, 32, data); vec_st(data3, 48, data); vec_st(data4, 64, data); vec_st(data5, 80, data); vec_st(data6, 96, data); vec_st(data7, 112, data); } // special handling of block[0] if (s->mb_intra) { if (!s->h263_aic) { if (n < 4) oldBaseValue /= s->y_dc_scale; else oldBaseValue /= s->c_dc_scale; } // Divide by 8, rounding the result data[0] = (oldBaseValue + 4) >> 3; } // We handled the tranpose permutation above and we don't // need to permute the "no" permutation case. if ((lastNonZero > 0) && (s->idct_permutation_type != FF_TRANSPOSE_IDCT_PERM) && (s->idct_permutation_type != FF_NO_IDCT_PERM)) { ff_block_permute(data, s->idct_permutation, s->intra_scantable.scantable, lastNonZero); } return lastNonZero; }
void gimp_composite_grain_extract_rgba8_rgba8_rgba8_altivec (GimpCompositeContext *ctx) { const guchar *A = ctx->A; const guchar *B = ctx->B; guchar *D = ctx->D; guint length = ctx->n_pixels; vector unsigned char a,b,d,alpha_a,alpha_b,alpha; vector signed short ah,al,bh,bl; while (length >= 4) { a=LoadUnaligned(A); b=LoadUnaligned(B); alpha_a=vec_and(a, alphamask); alpha_b=vec_and(b, alphamask); alpha=vec_min(alpha_a, alpha_b); ah=vec_unpackh((vector signed char)a); ah=vec_and(ah,ox00ff); al=vec_unpackl((vector signed char)a); al=vec_and(al,ox00ff); bh=vec_unpackh((vector signed char)b); bh=vec_and(bh,ox00ff); bl=vec_unpackl((vector signed char)b); bl=vec_and(bl,ox00ff); ah=vec_sub(ah,bh); al=vec_sub(al,bl); ah=vec_sub(ah,oxff80); al=vec_sub(al,oxff80); d=vec_packsu(ah,al); d=vec_andc(d, alphamask); d=vec_or(d, alpha); StoreUnaligned(d, D); A+=16; B+=16; D+=16; length-=4; } /* process last pixels */ length = length*4; a=LoadUnalignedLess(A, length); b=LoadUnalignedLess(B, length); alpha_a=vec_and(a, alphamask); alpha_b=vec_and(b, alphamask); alpha=vec_min(alpha_a, alpha_b); ah=vec_unpackh((vector signed char)a); ah=vec_and(ah,ox00ff); al=vec_unpackl((vector signed char)a); al=vec_and(al,ox00ff); bh=vec_unpackh((vector signed char)b); bh=vec_and(bh,ox00ff); bl=vec_unpackl((vector signed char)b); bl=vec_and(bl,ox00ff); ah=vec_sub(ah,bh); al=vec_sub(al,bl); ah=vec_sub(ah,oxff80); al=vec_sub(al,oxff80); d=vec_packsu(ah,al); d=vec_andc(d, alphamask); d=vec_or(d, alpha); StoreUnalignedLess(d, D, length); }
/** Do inverse transform on 8x4 part of block */ static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, DCTELEM *block) { vector signed short src0, src1, src2, src3, src4, src5, src6, src7; vector signed int s0, s1, s2, s3, s4, s5, s6, s7; vector signed int s8, s9, sA, sB, sC, sD, sE, sF; vector signed int t0, t1, t2, t3, t4, t5, t6, t7; const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4)); const vector unsigned int vec_7 = vec_splat_u32(7); const vector unsigned int vec_5 = vec_splat_u32(5); const vector unsigned int vec_4 = vec_splat_u32(4); const vector signed int vec_4s = vec_splat_s32(4); const vector unsigned int vec_3 = vec_splat_u32(3); const vector unsigned int vec_2 = vec_splat_u32(2); const vector unsigned int vec_1 = vec_splat_u32(1); vector unsigned char tmp; vector signed short tmp2, tmp3; vector unsigned char perm0, perm1, p0, p1, p; src0 = vec_ld( 0, block); src1 = vec_ld( 16, block); src2 = vec_ld( 32, block); src3 = vec_ld( 48, block); src4 = vec_ld( 64, block); src5 = vec_ld( 80, block); src6 = vec_ld( 96, block); src7 = vec_ld(112, block); TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); s0 = vec_unpackl(src0); s1 = vec_unpackl(src1); s2 = vec_unpackl(src2); s3 = vec_unpackl(src3); s4 = vec_unpackl(src4); s5 = vec_unpackl(src5); s6 = vec_unpackl(src6); s7 = vec_unpackl(src7); s8 = vec_unpackh(src0); s9 = vec_unpackh(src1); sA = vec_unpackh(src2); sB = vec_unpackh(src3); sC = vec_unpackh(src4); sD = vec_unpackh(src5); sE = vec_unpackh(src6); sF = vec_unpackh(src7); STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s); SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7); STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s); SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF); src0 = vec_pack(s8, s0); src1 = vec_pack(s9, s1); src2 = vec_pack(sA, s2); src3 = vec_pack(sB, s3); src4 = vec_pack(sC, s4); src5 = vec_pack(sD, s5); src6 = vec_pack(sE, s6); src7 = vec_pack(sF, s7); TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); s0 = vec_unpackh(src0); s1 = vec_unpackh(src1); s2 = vec_unpackh(src2); s3 = vec_unpackh(src3); s8 = vec_unpackl(src0); s9 = vec_unpackl(src1); sA = vec_unpackl(src2); sB = vec_unpackl(src3); STEP4(s0, s1, s2, s3, vec_64); SHIFT_VERT4(s0, s1, s2, s3); STEP4(s8, s9, sA, sB, vec_64); SHIFT_VERT4(s8, s9, sA, sB); src0 = vec_pack(s0, s8); src1 = vec_pack(s1, s9); src2 = vec_pack(s2, sA); src3 = vec_pack(s3, sB); p0 = vec_lvsl (0, dest); p1 = vec_lvsl (stride, dest); p = vec_splat_u8 (-1); perm0 = vec_mergeh (p, p0); perm1 = vec_mergeh (p, p1); #define ADD(dest,src,perm) \ /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \ tmp = vec_ld (0, dest); \ tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), perm); \ tmp3 = vec_adds (tmp2, src); \ tmp = vec_packsu (tmp3, tmp3); \ vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest); \ vec_ste ((vector unsigned int)tmp, 4, (unsigned int *)dest); ADD (dest, src0, perm0) dest += stride; ADD (dest, src1, perm1) dest += stride; ADD (dest, src2, perm0) dest += stride; ADD (dest, src3, perm1) }
vll_sign vll_unpack_hi_2 (vi_sign a) { return vec_unpackh (a); }
vector signed long long testsi_h (vector signed int vsi2) { return vec_unpackh (vsi2); }
/** Do inverse transform on 8x8 block */ static void vc1_inv_trans_8x8_altivec(DCTELEM block[64], int sign, int rangered) { vector signed short src0, src1, src2, src3, src4, src5, src6, src7; vector signed int s0, s1, s2, s3, s4, s5, s6, s7; vector signed int s8, s9, sA, sB, sC, sD, sE, sF; vector signed int t0, t1, t2, t3, t4, t5, t6, t7; const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4)); const vector unsigned int vec_7 = vec_splat_u32(7); const vector unsigned int vec_4 = vec_splat_u32(4); const vector signed int vec_4s = vec_splat_s32(4); const vector unsigned int vec_3 = vec_splat_u32(3); const vector unsigned int vec_2 = vec_splat_u32(2); const vector signed int vec_1s = vec_splat_s32(1); const vector unsigned int vec_1 = vec_splat_u32(1); const vector unsigned short rangered_shift = vec_splat_u16(1); const vector signed short signed_bias = vec_sl(vec_splat_s16(4), vec_splat_u16(4)); src0 = vec_ld( 0, block); src1 = vec_ld( 16, block); src2 = vec_ld( 32, block); src3 = vec_ld( 48, block); src4 = vec_ld( 64, block); src5 = vec_ld( 80, block); src6 = vec_ld( 96, block); src7 = vec_ld(112, block); s0 = vec_unpackl(src0); s1 = vec_unpackl(src1); s2 = vec_unpackl(src2); s3 = vec_unpackl(src3); s4 = vec_unpackl(src4); s5 = vec_unpackl(src5); s6 = vec_unpackl(src6); s7 = vec_unpackl(src7); s8 = vec_unpackh(src0); s9 = vec_unpackh(src1); sA = vec_unpackh(src2); sB = vec_unpackh(src3); sC = vec_unpackh(src4); sD = vec_unpackh(src5); sE = vec_unpackh(src6); sF = vec_unpackh(src7); STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s); SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7); STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s); SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF); src0 = vec_pack(s8, s0); src1 = vec_pack(s9, s1); src2 = vec_pack(sA, s2); src3 = vec_pack(sB, s3); src4 = vec_pack(sC, s4); src5 = vec_pack(sD, s5); src6 = vec_pack(sE, s6); src7 = vec_pack(sF, s7); TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); s0 = vec_unpackl(src0); s1 = vec_unpackl(src1); s2 = vec_unpackl(src2); s3 = vec_unpackl(src3); s4 = vec_unpackl(src4); s5 = vec_unpackl(src5); s6 = vec_unpackl(src6); s7 = vec_unpackl(src7); s8 = vec_unpackh(src0); s9 = vec_unpackh(src1); sA = vec_unpackh(src2); sB = vec_unpackh(src3); sC = vec_unpackh(src4); sD = vec_unpackh(src5); sE = vec_unpackh(src6); sF = vec_unpackh(src7); STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64); SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7); STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64); SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF); src0 = vec_pack(s8, s0); src1 = vec_pack(s9, s1); src2 = vec_pack(sA, s2); src3 = vec_pack(sB, s3); src4 = vec_pack(sC, s4); src5 = vec_pack(sD, s5); src6 = vec_pack(sE, s6); src7 = vec_pack(sF, s7); if (rangered) { if (!sign) { src0 = vec_sub(src0, signed_bias); src1 = vec_sub(src1, signed_bias); src2 = vec_sub(src2, signed_bias); src3 = vec_sub(src3, signed_bias); src4 = vec_sub(src4, signed_bias); src5 = vec_sub(src5, signed_bias); src6 = vec_sub(src6, signed_bias); src7 = vec_sub(src7, signed_bias); } src0 = vec_sl(src0, rangered_shift); src1 = vec_sl(src1, rangered_shift); src2 = vec_sl(src2, rangered_shift); src3 = vec_sl(src3, rangered_shift); src4 = vec_sl(src4, rangered_shift); src5 = vec_sl(src5, rangered_shift); src6 = vec_sl(src6, rangered_shift); src7 = vec_sl(src7, rangered_shift); } vec_st(src0, 0, block); vec_st(src1, 16, block); vec_st(src2, 32, block); vec_st(src3, 48, block); vec_st(src4, 64, block); vec_st(src5, 80, block); vec_st(src6, 96, block); vec_st(src7,112, block); }
vector bool long long testbi_h (vector bool int vbi2) { return vec_unpackh (vbi2); }
inline void v_mul_expand(const v_int16x8& a, const v_int16x8& b, v_int32x4& c, v_int32x4& d) { c.val = vec_mul(vec_unpackh(a.val), vec_unpackh(b.val)); d.val = vec_mul(vec_unpackl(a.val), vec_unpackl(b.val)); }
/** Do inverse transform on 8x4 part of block */ static void vc1_inv_trans_8x4_altivec(DCTELEM block[64], int n) { vector signed short src0, src1, src2, src3, src4, src5, src6, src7; vector signed int s0, s1, s2, s3, s4, s5, s6, s7; vector signed int s8, s9, sA, sB, sC, sD, sE, sF; vector signed int t0, t1, t2, t3, t4, t5, t6, t7; const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4)); const vector unsigned int vec_7 = vec_splat_u32(7); const vector unsigned int vec_5 = vec_splat_u32(5); const vector unsigned int vec_4 = vec_splat_u32(4); const vector signed int vec_4s = vec_splat_s32(4); const vector unsigned int vec_3 = vec_splat_u32(3); const vector unsigned int vec_2 = vec_splat_u32(2); const vector unsigned int vec_1 = vec_splat_u32(1); src0 = vec_ld( 0, block); src1 = vec_ld( 16, block); src2 = vec_ld( 32, block); src3 = vec_ld( 48, block); src4 = vec_ld( 64, block); src5 = vec_ld( 80, block); src6 = vec_ld( 96, block); src7 = vec_ld(112, block); TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); s0 = vec_unpackl(src0); s1 = vec_unpackl(src1); s2 = vec_unpackl(src2); s3 = vec_unpackl(src3); s4 = vec_unpackl(src4); s5 = vec_unpackl(src5); s6 = vec_unpackl(src6); s7 = vec_unpackl(src7); s8 = vec_unpackh(src0); s9 = vec_unpackh(src1); sA = vec_unpackh(src2); sB = vec_unpackh(src3); sC = vec_unpackh(src4); sD = vec_unpackh(src5); sE = vec_unpackh(src6); sF = vec_unpackh(src7); STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s); SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7); STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s); SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF); src0 = vec_pack(s8, s0); src1 = vec_pack(s9, s1); src2 = vec_pack(sA, s2); src3 = vec_pack(sB, s3); src4 = vec_pack(sC, s4); src5 = vec_pack(sD, s5); src6 = vec_pack(sE, s6); src7 = vec_pack(sF, s7); TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); if(!n){ // upper half of block s0 = vec_unpackh(src0); s1 = vec_unpackh(src1); s2 = vec_unpackh(src2); s3 = vec_unpackh(src3); s8 = vec_unpackl(src0); s9 = vec_unpackl(src1); sA = vec_unpackl(src2); sB = vec_unpackl(src3); STEP4(s0, s1, s2, s3, vec_64); SHIFT_VERT4(s0, s1, s2, s3); STEP4(s8, s9, sA, sB, vec_64); SHIFT_VERT4(s8, s9, sA, sB); src0 = vec_pack(s0, s8); src1 = vec_pack(s1, s9); src2 = vec_pack(s2, sA); src3 = vec_pack(s3, sB); vec_st(src0, 0, block); vec_st(src1, 16, block); vec_st(src2, 32, block); vec_st(src3, 48, block); } else { //lower half of block s0 = vec_unpackh(src4); s1 = vec_unpackh(src5); s2 = vec_unpackh(src6); s3 = vec_unpackh(src7); s8 = vec_unpackl(src4); s9 = vec_unpackl(src5); sA = vec_unpackl(src6); sB = vec_unpackl(src7); STEP4(s0, s1, s2, s3, vec_64); SHIFT_VERT4(s0, s1, s2, s3); STEP4(s8, s9, sA, sB, vec_64); SHIFT_VERT4(s8, s9, sA, sB); src4 = vec_pack(s0, s8); src5 = vec_pack(s1, s9); src6 = vec_pack(s2, sA); src7 = vec_pack(s3, sB); vec_st(src4, 64, block); vec_st(src5, 80, block); vec_st(src6, 96, block); vec_st(src7,112, block); } }
// CHECK-LABEL: define void @test1 void test1() { /* vec_cmpeq */ res_vbll = vec_cmpeq(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpequd // CHECK-LE: @llvm.ppc.altivec.vcmpequd // CHECK-PPC: error: call to 'vec_cmpeq' is ambiguous res_vbll = vec_cmpeq(vull, vull); // CHECK: @llvm.ppc.altivec.vcmpequd // CHECK-LE: @llvm.ppc.altivec.vcmpequd // CHECK-PPC: error: call to 'vec_cmpeq' is ambiguous /* vec_cmpgt */ res_vbll = vec_cmpgt(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd // CHECK-PPC: error: call to 'vec_cmpgt' is ambiguous res_vbll = vec_cmpgt(vull, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud // CHECK-LE: @llvm.ppc.altivec.vcmpgtud // CHECK-PPC: error: call to 'vec_cmpgt' is ambiguous /* ----------------------- predicates --------------------------- */ /* vec_all_eq */ res_i = vec_all_eq(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous res_i = vec_all_eq(vsll, vbll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous res_i = vec_all_eq(vull, vull); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous res_i = vec_all_eq(vull, vbll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous res_i = vec_all_eq(vbll, vsll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous res_i = vec_all_eq(vbll, vull); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous res_i = vec_all_eq(vbll, vbll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous /* vec_all_ne */ res_i = vec_all_ne(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous res_i = vec_all_ne(vsll, vbll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous res_i = vec_all_ne(vull, vull); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous res_i = vec_all_ne(vull, vbll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous res_i = vec_all_ne(vbll, vsll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous res_i = vec_all_ne(vbll, vull); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous res_i = vec_all_ne(vbll, vbll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous /* vec_any_eq */ res_i = vec_any_eq(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous res_i = vec_any_eq(vsll, vbll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous res_i = vec_any_eq(vull, vull); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous res_i = vec_any_eq(vull, vbll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous res_i = vec_any_eq(vbll, vsll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous res_i = vec_any_eq(vbll, vull); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous res_i = vec_any_eq(vbll, vbll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous /* vec_any_ne */ res_i = vec_any_ne(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous res_i = vec_any_ne(vsll, vbll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous res_i = vec_any_ne(vull, vull); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous res_i = vec_any_ne(vull, vbll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous res_i = vec_any_ne(vbll, vsll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous res_i = vec_any_ne(vbll, vull); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous res_i = vec_any_ne(vbll, vbll); // CHECK: @llvm.ppc.altivec.vcmpequd.p // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous /* vec_all_ge */ res_i = vec_all_ge(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous res_i = vec_all_ge(vsll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous res_i = vec_all_ge(vull, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous res_i = vec_all_ge(vull, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous res_i = vec_all_ge(vbll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous res_i = vec_all_ge(vbll, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous res_i = vec_all_ge(vbll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous /* vec_all_gt */ res_i = vec_all_gt(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous res_i = vec_all_gt(vsll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous res_i = vec_all_gt(vull, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous res_i = vec_all_gt(vull, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous res_i = vec_all_gt(vbll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous res_i = vec_all_gt(vbll, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous res_i = vec_all_gt(vbll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous /* vec_all_le */ res_i = vec_all_le(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_all_le' is ambiguous res_i = vec_all_le(vsll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_all_le' is ambiguous res_i = vec_all_le(vull, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_le' is ambiguous res_i = vec_all_le(vull, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_le' is ambiguous res_i = vec_all_le(vbll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_le' is ambiguous res_i = vec_all_le(vbll, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_le' is ambiguous res_i = vec_all_le(vbll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_le' is ambiguous /* vec_all_lt */ res_i = vec_all_lt(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous res_i = vec_all_lt(vsll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous res_i = vec_all_lt(vull, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous res_i = vec_all_lt(vull, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous res_i = vec_all_lt(vbll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous res_i = vec_all_lt(vbll, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous res_i = vec_all_lt(vbll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous /* vec_any_ge */ res_i = vec_any_ge(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous res_i = vec_any_ge(vsll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous res_i = vec_any_ge(vull, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous res_i = vec_any_ge(vull, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous res_i = vec_any_ge(vbll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous res_i = vec_any_ge(vbll, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous res_i = vec_any_ge(vbll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous /* vec_any_gt */ res_i = vec_any_gt(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous res_i = vec_any_gt(vsll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous res_i = vec_any_gt(vull, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous res_i = vec_any_gt(vull, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous res_i = vec_any_gt(vbll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous res_i = vec_any_gt(vbll, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous res_i = vec_any_gt(vbll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous /* vec_any_le */ res_i = vec_any_le(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_any_le' is ambiguous res_i = vec_any_le(vsll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_any_le' is ambiguous res_i = vec_any_le(vull, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_le' is ambiguous res_i = vec_any_le(vull, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_le' is ambiguous res_i = vec_any_le(vbll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_le' is ambiguous res_i = vec_any_le(vbll, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_le' is ambiguous res_i = vec_any_le(vbll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_le' is ambiguous /* vec_any_lt */ res_i = vec_any_lt(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous res_i = vec_any_lt(vsll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous res_i = vec_any_lt(vull, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous res_i = vec_any_lt(vull, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous res_i = vec_any_lt(vbll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous res_i = vec_any_lt(vbll, vull); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous res_i = vec_any_lt(vbll, vbll); // CHECK: @llvm.ppc.altivec.vcmpgtud.p // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous /* vec_max */ res_vsll = vec_max(vsll, vsll); // CHECK: @llvm.ppc.altivec.vmaxsd // CHECK-LE: @llvm.ppc.altivec.vmaxsd // CHECK-PPC: error: call to 'vec_max' is ambiguous res_vsll = vec_max(vbll, vsll); // CHECK: @llvm.ppc.altivec.vmaxsd // CHECK-LE: @llvm.ppc.altivec.vmaxsd // CHECK-PPC: error: call to 'vec_max' is ambiguous res_vsll = vec_max(vsll, vbll); // CHECK: @llvm.ppc.altivec.vmaxsd // CHECK-LE: @llvm.ppc.altivec.vmaxsd // CHECK-PPC: error: call to 'vec_max' is ambiguous res_vull = vec_max(vull, vull); // CHECK: @llvm.ppc.altivec.vmaxud // CHECK-LE: @llvm.ppc.altivec.vmaxud // CHECK-PPC: error: call to 'vec_max' is ambiguous res_vull = vec_max(vbll, vull); // CHECK: @llvm.ppc.altivec.vmaxud // CHECK-LE: @llvm.ppc.altivec.vmaxud // CHECK-PPC: error: call to 'vec_max' is ambiguous res_vull = vec_max(vull, vbll); // CHECK: @llvm.ppc.altivec.vmaxud // CHECK-LE: @llvm.ppc.altivec.vmaxud // CHECK-PPC: error: call to 'vec_max' is ambiguous /* vec_min */ res_vsll = vec_min(vsll, vsll); // CHECK: @llvm.ppc.altivec.vminsd // CHECK-LE: @llvm.ppc.altivec.vminsd // CHECK-PPC: error: call to 'vec_min' is ambiguous res_vsll = vec_min(vbll, vsll); // CHECK: @llvm.ppc.altivec.vminsd // CHECK-LE: @llvm.ppc.altivec.vminsd // CHECK-PPC: error: call to 'vec_min' is ambiguous res_vsll = vec_min(vsll, vbll); // CHECK: @llvm.ppc.altivec.vminsd // CHECK-LE: @llvm.ppc.altivec.vminsd // CHECK-PPC: error: call to 'vec_min' is ambiguous res_vull = vec_min(vull, vull); // CHECK: @llvm.ppc.altivec.vminud // CHECK-LE: @llvm.ppc.altivec.vminud // CHECK-PPC: error: call to 'vec_min' is ambiguous res_vull = vec_min(vbll, vull); // CHECK: @llvm.ppc.altivec.vminud // CHECK-LE: @llvm.ppc.altivec.vminud // CHECK-PPC: error: call to 'vec_min' is ambiguous res_vull = vec_min(vull, vbll); // CHECK: @llvm.ppc.altivec.vminud // CHECK-LE: @llvm.ppc.altivec.vminud // CHECK-PPC: error: call to 'vec_min' is ambiguous /* vec_mule */ res_vsll = vec_mule(vi, vi); // CHECK: @llvm.ppc.altivec.vmulesw // CHECK-LE: @llvm.ppc.altivec.vmulosw // CHECK-PPC: error: call to 'vec_mule' is ambiguous res_vull = vec_mule(vui , vui); // CHECK: @llvm.ppc.altivec.vmuleuw // CHECK-LE: @llvm.ppc.altivec.vmulouw // CHECK-PPC: error: call to 'vec_mule' is ambiguous /* vec_mulo */ res_vsll = vec_mulo(vi, vi); // CHECK: @llvm.ppc.altivec.vmulosw // CHECK-LE: @llvm.ppc.altivec.vmulesw // CHECK-PPC: error: call to 'vec_mulo' is ambiguous res_vull = vec_mulo(vui, vui); // CHECK: @llvm.ppc.altivec.vmulouw // CHECK-LE: @llvm.ppc.altivec.vmuleuw // CHECK-PPC: error: call to 'vec_mulo' is ambiguous /* vec_packs */ res_vi = vec_packs(vsll, vsll); // CHECK: @llvm.ppc.altivec.vpksdss // CHECK-LE: @llvm.ppc.altivec.vpksdss // CHECK-PPC: error: call to 'vec_packs' is ambiguous res_vui = vec_packs(vull, vull); // CHECK: @llvm.ppc.altivec.vpkudus // CHECK-LE: @llvm.ppc.altivec.vpkudus // CHECK-PPC: error: call to 'vec_packs' is ambiguous /* vec_packsu */ res_vui = vec_packsu(vsll, vsll); // CHECK: @llvm.ppc.altivec.vpksdus // CHECK-LE: @llvm.ppc.altivec.vpksdus // CHECK-PPC: error: call to 'vec_packsu' is ambiguous res_vui = vec_packsu(vull, vull); // CHECK: @llvm.ppc.altivec.vpkudus // CHECK-LE: @llvm.ppc.altivec.vpkudus // CHECK-PPC: error: call to 'vec_packsu' is ambiguous /* vec_rl */ res_vsll = vec_rl(vsll, vull); // CHECK: @llvm.ppc.altivec.vrld // CHECK-LE: @llvm.ppc.altivec.vrld // CHECK-PPC: error: call to 'vec_rl' is ambiguous res_vull = vec_rl(vull, vull); // CHECK: @llvm.ppc.altivec.vrld // CHECK-LE: @llvm.ppc.altivec.vrld // CHECK-PPC: error: call to 'vec_rl' is ambiguous /* vec_sl */ res_vsll = vec_sl(vsll, vull); // CHECK: shl <2 x i64> // CHECK-LE: shl <2 x i64> // CHECK-PPC: error: call to 'vec_sl' is ambiguous res_vull = vec_sl(vull, vull); // CHECK: shl <2 x i64> // CHECK-LE: shl <2 x i64> // CHECK-PPC: error: call to 'vec_sl' is ambiguous /* vec_sr */ res_vsll = vec_sr(vsll, vull); // CHECK: ashr <2 x i64> // CHECK-LE: ashr <2 x i64> // CHECK-PPC: error: call to 'vec_sr' is ambiguous res_vull = vec_sr(vull, vull); // CHECK: lshr <2 x i64> // CHECK-LE: lshr <2 x i64> // CHECK-PPC: error: call to 'vec_sr' is ambiguous /* vec_sra */ res_vsll = vec_sra(vsll, vull); // CHECK: ashr <2 x i64> // CHECK-LE: ashr <2 x i64> // CHECK-PPC: error: call to 'vec_sra' is ambiguous res_vull = vec_sra(vull, vull); // CHECK: ashr <2 x i64> // CHECK-LE: ashr <2 x i64> // CHECK-PPC: error: call to 'vec_sra' is ambiguous /* vec_unpackh */ res_vsll = vec_unpackh(vi); // CHECK: llvm.ppc.altivec.vupkhsw // CHECK-LE: llvm.ppc.altivec.vupklsw // CHECK-PPC: error: call to 'vec_unpackh' is ambiguous res_vbll = vec_unpackh(vbi); // CHECK: llvm.ppc.altivec.vupkhsw // CHECK-LE: llvm.ppc.altivec.vupklsw // CHECK-PPC: error: call to 'vec_unpackh' is ambiguous /* vec_unpackl */ res_vsll = vec_unpackl(vi); // CHECK: llvm.ppc.altivec.vupklsw // CHECK-LE: llvm.ppc.altivec.vupkhsw // CHECK-PPC: error: call to 'vec_unpackl' is ambiguous res_vbll = vec_unpackl(vbi); // CHECK: llvm.ppc.altivec.vupklsw // CHECK-LE: llvm.ppc.altivec.vupkhsw // CHECK-PPC: error: call to 'vec_unpackl' is ambiguous /* vec_vpksdss */ res_vi = vec_vpksdss(vsll, vsll); // CHECK: llvm.ppc.altivec.vpksdss // CHECK-LE: llvm.ppc.altivec.vpksdss // CHECK-PPC: warning: implicit declaration of function 'vec_vpksdss' /* vec_vpksdus */ res_vui = vec_vpksdus(vsll, vsll); // CHECK: llvm.ppc.altivec.vpksdus // CHECK-LE: llvm.ppc.altivec.vpksdus // CHECK-PPC: warning: implicit declaration of function 'vec_vpksdus' /* vec_vpkudum */ res_vi = vec_vpkudum(vsll, vsll); // CHECK: vperm // CHECK-LE: vperm // CHECK-PPC: warning: implicit declaration of function 'vec_vpkudum' res_vui = vec_vpkudum(vull, vull); // CHECK: vperm // CHECK-LE: vperm res_vui = vec_vpkudus(vull, vull); // CHECK: llvm.ppc.altivec.vpkudus // CHECK-LE: llvm.ppc.altivec.vpkudus // CHECK-PPC: warning: implicit declaration of function 'vec_vpkudus' /* vec_vupkhsw */ res_vsll = vec_vupkhsw(vi); // CHECK: llvm.ppc.altivec.vupkhsw // CHECK-LE: llvm.ppc.altivec.vupklsw // CHECK-PPC: warning: implicit declaration of function 'vec_vupkhsw' res_vbll = vec_vupkhsw(vbi); // CHECK: llvm.ppc.altivec.vupkhsw // CHECK-LE: llvm.ppc.altivec.vupklsw /* vec_vupklsw */ res_vsll = vec_vupklsw(vi); // CHECK: llvm.ppc.altivec.vupklsw // CHECK-LE: llvm.ppc.altivec.vupkhsw // CHECK-PPC: warning: implicit declaration of function 'vec_vupklsw' res_vbll = vec_vupklsw(vbi); // CHECK: llvm.ppc.altivec.vupklsw // CHECK-LE: llvm.ppc.altivec.vupkhsw /* vec_max */ res_vsll = vec_max(vsll, vsll); // CHECK: @llvm.ppc.altivec.vmaxsd // CHECK-LE: @llvm.ppc.altivec.vmaxsd res_vsll = vec_max(vbll, vsll); // CHECK: @llvm.ppc.altivec.vmaxsd // CHECK-LE: @llvm.ppc.altivec.vmaxsd res_vsll = vec_max(vsll, vbll); // CHECK: @llvm.ppc.altivec.vmaxsd // CHECK-LE: @llvm.ppc.altivec.vmaxsd res_vull = vec_max(vull, vull); // CHECK: @llvm.ppc.altivec.vmaxud // CHECK-LE: @llvm.ppc.altivec.vmaxud res_vull = vec_max(vbll, vull); // CHECK: @llvm.ppc.altivec.vmaxud // CHECK-LE: @llvm.ppc.altivec.vmaxud /* vec_min */ res_vsll = vec_min(vsll, vsll); // CHECK: @llvm.ppc.altivec.vminsd // CHECK-LE: @llvm.ppc.altivec.vminsd res_vsll = vec_min(vbll, vsll); // CHECK: @llvm.ppc.altivec.vminsd // CHECK-LE: @llvm.ppc.altivec.vminsd res_vsll = vec_min(vsll, vbll); // CHECK: @llvm.ppc.altivec.vminsd // CHECK-LE: @llvm.ppc.altivec.vminsd res_vull = vec_min(vull, vull); // CHECK: @llvm.ppc.altivec.vminud // CHECK-LE: @llvm.ppc.altivec.vminud res_vull = vec_min(vbll, vull); // CHECK: @llvm.ppc.altivec.vminud // CHECK-LE: @llvm.ppc.altivec.vminud }
static void test() { /* Input vectors. */ vector signed char vsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7}; vector bool char vbc = {0,255,255,0,0,0,255,0,255,0,0,255,255,255,0,255}; vector pixel vp = {(0<<15) + (1<<10) + (2<<5) + 3, (1<<15) + (4<<10) + (5<<5) + 6, (0<<15) + (7<<10) + (8<<5) + 9, (1<<15) + (10<<10) + (11<<5) + 12, (1<<15) + (13<<10) + (14<<5) + 15, (0<<15) + (16<<10) + (17<<5) + 18, (1<<15) + (19<<10) + (20<<5) + 21, (0<<15) + (22<<10) + (23<<5) + 24}; vector signed short vss = {-4,-3,-2,-1,0,1,2,3}; vector bool short vbs = {0,65535,65535,0,0,0,65535,0}; /* Result vectors. */ vector signed short vsch, vscl; vector bool short vbsh, vbsl; vector unsigned int vuih, vuil; vector signed int vsih, vsil; vector bool int vbih, vbil; /* Expected result vectors. */ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ vector signed short vschr = {0,1,2,3,4,5,6,7}; vector signed short vsclr = {-8,-7,-6,-5,-4,-3,-2,-1}; vector bool short vbshr = {65535,0,0,65535,65535,65535,0,65535}; vector bool short vbslr = {0,65535,65535,0,0,0,65535,0}; vector unsigned int vuihr = {(65535<<24) + (13<<16) + (14<<8) + 15, (0<<24) + (16<<16) + (17<<8) + 18, (65535<<24) + (19<<16) + (20<<8) + 21, (0<<24) + (22<<16) + (23<<8) + 24}; vector unsigned int vuilr = {(0<<24) + (1<<16) + (2<<8) + 3, (65535<<24) + (4<<16) + (5<<8) + 6, (0<<24) + (7<<16) + (8<<8) + 9, (65535<<24) + (10<<16) + (11<<8) + 12}; vector signed int vsihr = {0,1,2,3}; vector signed int vsilr = {-4,-3,-2,-1}; vector bool int vbihr = {0,0,BIG,0}; vector bool int vbilr = {0,BIG,BIG,0}; #else vector signed short vschr = {-8,-7,-6,-5,-4,-3,-2,-1}; vector signed short vsclr = {0,1,2,3,4,5,6,7}; vector bool short vbshr = {0,65535,65535,0,0,0,65535,0}; vector bool short vbslr = {65535,0,0,65535,65535,65535,0,65535}; vector unsigned int vuihr = {(0<<24) + (1<<16) + (2<<8) + 3, (65535<<24) + (4<<16) + (5<<8) + 6, (0<<24) + (7<<16) + (8<<8) + 9, (65535<<24) + (10<<16) + (11<<8) + 12}; vector unsigned int vuilr = {(65535<<24) + (13<<16) + (14<<8) + 15, (0<<24) + (16<<16) + (17<<8) + 18, (65535<<24) + (19<<16) + (20<<8) + 21, (0<<24) + (22<<16) + (23<<8) + 24}; vector signed int vsihr = {-4,-3,-2,-1}; vector signed int vsilr = {0,1,2,3}; vector bool int vbihr = {0,BIG,BIG,0}; vector bool int vbilr = {0,0,BIG,0}; #endif vsch = vec_unpackh (vsc); vscl = vec_unpackl (vsc); vbsh = vec_unpackh (vbc); vbsl = vec_unpackl (vbc); vuih = vec_unpackh (vp); vuil = vec_unpackl (vp); vsih = vec_unpackh (vss); vsil = vec_unpackl (vss); vbih = vec_unpackh (vbs); vbil = vec_unpackl (vbs); check (vec_all_eq (vsch, vschr), "vsch"); check (vec_all_eq (vscl, vsclr), "vscl"); check (vec_all_eq (vbsh, vbshr), "vbsh"); check (vec_all_eq (vbsl, vbslr), "vbsl"); check (vec_all_eq (vuih, vuihr), "vuih"); check (vec_all_eq (vuil, vuilr), "vuil"); check (vec_all_eq (vsih, vsihr), "vsih"); check (vec_all_eq (vsil, vsilr), "vsil"); check (vec_all_eq (vbih, vbihr), "vbih"); check (vec_all_eq (vbil, vbilr), "vbil"); }